From 889c80b682d01cb2ff87b20b36b237b817d786c4 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 29 Jul 2015 00:45:06 -0700 Subject: [PATCH 1/9] Rebase on rancher/netconf --- cmd/cloudinit/cloudinit.go | 6 +- cmd/network/ipv4ll.go | 75 ----------------- cmd/network/network.go | 161 +------------------------------------ config/types.go | 25 +----- 4 files changed, 9 insertions(+), 258 deletions(-) delete mode 100644 cmd/network/ipv4ll.go diff --git a/cmd/cloudinit/cloudinit.go b/cmd/cloudinit/cloudinit.go index 560457e2..e3322462 100644 --- a/cmd/cloudinit/cloudinit.go +++ b/cmd/cloudinit/cloudinit.go @@ -38,8 +38,8 @@ import ( "github.com/coreos/coreos-cloudinit/initialize" "github.com/coreos/coreos-cloudinit/pkg" "github.com/coreos/coreos-cloudinit/system" + "github.com/rancher/netconf" "github.com/rancherio/os/cmd/cloudinit/hostname" - rancherNetwork "github.com/rancherio/os/cmd/network" rancherConfig "github.com/rancherio/os/config" "github.com/rancherio/os/util" ) @@ -402,8 +402,8 @@ func getDatasources(cfg *rancherConfig.CloudConfig) []datasource.Datasource { } func enableDoLinkLocal() { - err := rancherNetwork.ApplyNetworkConfigs(&rancherConfig.NetworkConfig{ - Interfaces: map[string]rancherConfig.InterfaceConfig{ + err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{ + Interfaces: map[string]netconf.InterfaceConfig{ "eth0": { IPV4LL: true, }, diff --git a/cmd/network/ipv4ll.go b/cmd/network/ipv4ll.go deleted file mode 100644 index 301fd353..00000000 --- a/cmd/network/ipv4ll.go +++ /dev/null @@ -1,75 +0,0 @@ -package network - -import ( - "encoding/binary" - "fmt" - "math/rand" - "net" - - log "github.com/Sirupsen/logrus" - - "github.com/j-keck/arping" - "github.com/vishvananda/netlink" -) - -func AssignLinkLocalIP(link netlink.Link) error { - ifaceName := link.Attrs().Name - iface, err := net.InterfaceByName(ifaceName) - if err != nil { - log.Error("could not get information about interface") - return err - } - addrs, err := iface.Addrs() - if err != nil { - log.Error("Error fetching existing ip on interface") - } - for _, addr := range addrs { - if addr.String()[:7] == "169.254" { - log.Info("Link Local IP already set on interface") - return nil - } - } - randSource, err := getPseudoRandomGenerator(link.Attrs().HardwareAddr) - if err != nil { - return err - } - // try a random address upto 10 times - for i := 0; i < 10; i++ { - randGenerator := rand.New(*randSource) - randomNum := randGenerator.Uint32() - dstIP := getNewIPV4LLAddr(randomNum) - if dstIP[2] == 0 || dstIP[2] == 255 { - i-- - continue - } - _, _, err := arping.PingOverIfaceByName(dstIP, ifaceName) - if err != nil { - // this ip is not being used - addr, err := netlink.ParseAddr(dstIP.String() + "/16") - if err != nil { - log.Errorf("error while parsing ipv4ll addr, err = %v", err) - return err - } - if err := netlink.AddrAdd(link, addr); err != nil { - log.Error("ipv4ll addr add failed") - return err - } - log.Infof("Set %s on %s", dstIP.String(), link.Attrs().Name) - return nil - } - } - log.Error("Could not find a suitable ipv4ll") - return fmt.Errorf("Could not find a suitable ipv4ll") -} - -func getNewIPV4LLAddr(randomNum uint32) net.IP { - byte1 := randomNum & 255 // use least significant 8 bits - byte2 := randomNum >> 24 // use most significant 8 bits - return []byte{169, 254, byte(byte1), byte(byte2)} -} - -func getPseudoRandomGenerator(haAddr []byte) (*rand.Source, error) { - seed, _ := binary.Varint(haAddr) - src := rand.NewSource(seed) - return &src, nil -} diff --git a/cmd/network/network.go b/cmd/network/network.go index 36f15765..04e9505c 100644 --- a/cmd/network/network.go +++ b/cmd/network/network.go @@ -1,20 +1,13 @@ package network import ( - "bytes" - "errors" "fmt" - "net" "os" - "os/exec" - "strings" log "github.com/Sirupsen/logrus" + "github.com/rancher/netconf" "github.com/rancherio/os/config" - "github.com/rancherio/os/docker" - "github.com/ryanuber/go-glob" - "github.com/vishvananda/netlink" ) func Main() { @@ -27,153 +20,7 @@ func Main() { if err != nil { log.Fatal(err) } - ApplyNetworkConfigs(&cfg.Rancher.Network) -} - -func createInterfaces(netCfg *config.NetworkConfig) error { - for name, iface := range netCfg.Interfaces { - if !iface.Bridge { - continue - } - - bridge := netlink.Bridge{} - bridge.LinkAttrs.Name = name - - if err := netlink.LinkAdd(&bridge); err != nil { - log.Errorf("Failed to create bridge %s: %v", name, err) - } - } - - return nil -} - -func ApplyNetworkConfigs(netCfg *config.NetworkConfig) error { - if err := createInterfaces(netCfg); err != nil { - return err - } - - links, err := netlink.LinkList() - if err != nil { - return err - } - - //apply network config - for _, link := range links { - linkName := link.Attrs().Name - var match config.InterfaceConfig - - for key, netConf := range netCfg.Interfaces { - if netConf.Match == "" { - netConf.Match = key - } - - if netConf.Match == "" { - continue - } - - if len(netConf.Match) > 4 && strings.ToLower(netConf.Match[:3]) == "mac" { - haAddr, err := net.ParseMAC(netConf.Match[4:]) - if err != nil { - return err - } - if bytes.Compare(haAddr, link.Attrs().HardwareAddr) == 0 { - // MAC address match is used over all other matches - match = netConf - break - } - } - - // "" means match has not been found - if match.Match == "" && matches(linkName, netConf.Match) { - match = netConf - } - - if netConf.Match == linkName { - // Found exact match, use it over wildcard match - match = netConf - } - } - - if match.Match != "" { - err = applyNetConf(link, match) - if err != nil { - log.Errorf("Failed to apply settings to %s : %v", linkName, err) - } - } - } - - if err != nil { - return err - } - - //post run - if netCfg.PostRun != nil { - return docker.StartAndWait(config.DOCKER_SYSTEM_HOST, netCfg.PostRun) - } - return nil -} - -func applyNetConf(link netlink.Link, netConf config.InterfaceConfig) error { - if netConf.DHCP { - log.Infof("Running DHCP on %s", link.Attrs().Name) - cmd := exec.Command("dhcpcd", "-A4", "-e", "force_hostname=true", link.Attrs().Name) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - log.Error(err) - } - } else if netConf.IPV4LL { - if err := AssignLinkLocalIP(link); err != nil { - log.Error("IPV4LL set failed") - return err - } - } else if netConf.Address == "" { - return nil - } else { - addr, err := netlink.ParseAddr(netConf.Address) - if err != nil { - return err - } - if err := netlink.AddrAdd(link, addr); err != nil { - log.Error("addr add failed") - return err - } - log.Infof("Set %s on %s", netConf.Address, link.Attrs().Name) - } - - if netConf.MTU > 0 { - if err := netlink.LinkSetMTU(link, netConf.MTU); err != nil { - log.Error("set MTU Failed") - return err - } - } - - if err := netlink.LinkSetUp(link); err != nil { - log.Error("failed to setup link") - return err - } - - if netConf.Gateway != "" { - gatewayIp := net.ParseIP(netConf.Gateway) - if gatewayIp == nil { - return errors.New("Invalid gateway address " + netConf.Gateway) - } - - route := netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - Gw: net.ParseIP(netConf.Gateway), - } - if err := netlink.RouteAdd(&route); err != nil { - log.Error("gateway set failed") - return err - } - - log.Infof("Set default gateway %s", netConf.Gateway) - } - - return nil -} - -func matches(link, conf string) bool { - return glob.Glob(conf, link) + if err := netconf.ApplyNetworkConfigs(&cfg.Rancher.Network); err != nil { + log.Fatal(err) + } } diff --git a/config/types.go b/config/types.go index 226468dc..29445ecb 100644 --- a/config/types.go +++ b/config/types.go @@ -2,6 +2,7 @@ package config import ( "github.com/coreos/coreos-cloudinit/config" + "github.com/rancher/netconf" "github.com/rancherio/rancher-compose/librcompose/project" ) @@ -76,7 +77,7 @@ type RancherConfig struct { Disable []string `yaml:"disable,omitempty"` ServicesInclude map[string]bool `yaml:"services_include,omitempty"` Modules []string `yaml:"modules,omitempty"` - Network NetworkConfig `yaml:"network,omitempty"` + Network netconf.NetworkConfig `yaml:"network,omitempty"` Repositories Repositories `yaml:"repositories,omitempty"` Ssh SshConfig `yaml:"ssh,omitempty"` State StateConfig `yaml:"state,omitempty"` @@ -97,28 +98,6 @@ type UpgradeConfig struct { Rollback string `yaml:"rollback,omitempty"` } -type DnsConfig struct { - Nameservers []string `yaml:"nameservers,flow,omitempty"` - Search []string `yaml:"search,flow,omitempty"` - Domain string `yaml:"domain,omitempty"` -} - -type NetworkConfig struct { - Dns DnsConfig `yaml:"dns,omitempty"` - Interfaces map[string]InterfaceConfig `yaml:"interfaces,omitempty"` - PostRun *ContainerConfig `yaml:"post_run,omitempty"` -} - -type InterfaceConfig struct { - Match string `yaml:"match,omitempty"` - DHCP bool `yaml:"dhcp,omitempty"` - Address string `yaml:"address,omitempty"` - IPV4LL bool `yaml:"ipv4ll,omitempty"` - Gateway string `yaml:"gateway,omitempty"` - MTU int `yaml:"mtu,omitempty"` - Bridge bool `yaml:"bridge,omitempty"` -} - type DockerConfig struct { TLS bool `yaml:"tls,omitempty"` TLSArgs []string `yaml:"tls_args,flow,omitempty"` From 19f9a1b281b3435c52c32f7d2a1a8453fd91fca8 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 29 Jul 2015 00:51:49 -0700 Subject: [PATCH 2/9] Rebase on docker-from-scratch --- config/types.go | 7 +- init/bootstrap.go | 25 ++- init/init.go | 408 ++++++++++++---------------------------------- init/root.go | 96 +++++++++++ main.go | 3 +- os-config.yml | 31 ++-- scripts/run | 9 +- 7 files changed, 237 insertions(+), 342 deletions(-) create mode 100644 init/root.go diff --git a/config/types.go b/config/types.go index 29445ecb..eb08cf7b 100644 --- a/config/types.go +++ b/config/types.go @@ -9,15 +9,16 @@ import ( const ( CONSOLE_CONTAINER = "console" DOCKER_BIN = "/usr/bin/docker" + ROS_BIN = "/usr/bin/ros" + SYSINIT_BIN = "/usr/bin/ros-sysinit" DOCKER_SYSTEM_HOME = "/var/lib/system-docker" DOCKER_SYSTEM_HOST = "unix:///var/run/system-docker.sock" DOCKER_HOST = "unix:///var/run/docker.sock" - IMAGES_PATH = "/" + IMAGES_PATH = "/usr/share/ros" IMAGES_PATTERN = "images*.tar" - SYS_INIT = "/sbin/init-sys" - USER_INIT = "/sbin/init-user" MODULES_ARCHIVE = "/modules.tar" DEBUG = false + SYSTEM_DOCKER_LOG = "/var/log/system-docker.log" LABEL = "label" HASH = "io.rancher.os.hash" diff --git a/init/bootstrap.go b/init/bootstrap.go index 1d64fa71..1ed20b73 100644 --- a/init/bootstrap.go +++ b/init/bootstrap.go @@ -2,16 +2,17 @@ package init import ( "os" - "os/exec" "syscall" "fmt" + "strings" + log "github.com/Sirupsen/logrus" + "github.com/rancher/docker-from-scratch" "github.com/rancherio/os/config" "github.com/rancherio/os/docker" "github.com/rancherio/os/util" "github.com/rancherio/rancher-compose/librcompose/project" - "strings" ) func autoformat(cfg *config.CloudConfig) error { @@ -31,20 +32,14 @@ func runBootstrapContainers(cfg *config.CloudConfig) error { return docker.RunServices("bootstrap", cfg, cfg.Rancher.BootstrapContainers) } -func startDocker(cfg *config.CloudConfig) (chan interface{}, error) { - for _, d := range []string{config.DOCKER_SYSTEM_HOST, "/var/run"} { - err := os.MkdirAll(d, 0700) - if err != nil { - return nil, err - } - } +func startDocker(cfg *config.Config) (chan interface{}, error) { - cmd := exec.Command(cfg.Rancher.BootstrapDocker.Args[0], cfg.Rancher.BootstrapDocker.Args[1:]...) - if cfg.Rancher.Debug { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - err := cmd.Start() + launchConfig, args := getLaunchConfig(cfg, &cfg.Rancher.BootstrapDocker) + launchConfig.Fork = true + launchConfig.LogFile = "" + launchConfig.NoLog = true + + cmd, err := dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...) if err != nil { return nil, err } diff --git a/init/init.go b/init/init.go index 26dc4eec..deb28a70 100644 --- a/init/init.go +++ b/init/init.go @@ -3,203 +3,54 @@ package init import ( + "bufio" "fmt" - "io/ioutil" "os" "os/exec" "strings" - "syscall" log "github.com/Sirupsen/logrus" - "github.com/rancherio/os/cmd/network" + "github.com/rancher/docker-from-scratch" "github.com/rancherio/os/config" "github.com/rancherio/os/util" ) const ( - STATE string = "/var" - SYSTEM_DOCKER string = "/usr/bin/system-docker" - DOCKER string = "/usr/bin/docker" - SYSINIT string = "/sbin/rancher-sysinit" + STATE string = "/state" ) var ( - dirs []string = []string{ - "/etc/ssl/certs", - "/sbin", - "/usr/bin", - "/usr/sbin", - } - postDirs []string = []string{ - "/var/log", - "/var/lib/rancher/state/home", - "/var/lib/rancher/state/opt", - } - mounts [][]string = [][]string{ - {"devtmpfs", "/dev", "devtmpfs", ""}, - {"none", "/dev/pts", "devpts", ""}, - {"none", "/etc/docker", "tmpfs", ""}, - {"none", "/proc", "proc", ""}, - {"none", "/run", "tmpfs", ""}, - {"none", "/sys", "sysfs", ""}, - {"none", "/sys/fs/cgroup", "tmpfs", ""}, - } - postMounts [][]string = [][]string{ - {"none", "/var/run", "tmpfs", ""}, - } - cgroups []string = []string{ - "blkio", - "cpu", - "cpuacct", - "cpuset", - "devices", - "freezer", - "memory", - "net_cls", - "perf_event", - } - // Notice this map is the reverse order of a "ln -s x y" command - // so map[y] = x - symlinks map[string]string = map[string]string{ - "/etc/ssl/certs/ca-certificates.crt": "/ca.crt", - "/sbin/modprobe": "/busybox", - "/usr/sbin/iptables": "/xtables-multi", - DOCKER: "/docker", - SYSTEM_DOCKER: "/docker", - SYSINIT: "/init", - "/home": "/var/lib/rancher/state/home", - "/opt": "/var/lib/rancher/state/opt", + mountConfig = dockerlaunch.Config{ + CgroupHierarchy: map[string]string{ + "cpu": "cpu", + "cpuacct": "cpu", + "net_cls": "net_cls", + "net_prio": "net_cls", + }, } ) -func createSymlinks(cfg *config.CloudConfig, symlinks map[string]string) error { - log.Debug("Creating symlinking") - for dest, src := range symlinks { - if _, err := os.Stat(dest); os.IsNotExist(err) { - log.Debugf("Symlinking %s => %s", src, dest) - if err = os.Symlink(src, dest); err != nil { - return err - } - } - } +func loadModules(cfg *config.Config) error { + mounted := map[string]bool{} - return nil -} - -func createDirs(dirs ...string) error { - for _, dir := range dirs { - if _, err := os.Stat(dir); os.IsNotExist(err) { - log.Debugf("Creating %s", dir) - err = os.MkdirAll(dir, 0755) - if err != nil { - return err - } - } - } - - return nil -} - -func createMounts(mounts ...[]string) error { - for _, mount := range mounts { - log.Debugf("Mounting %s %s %s %s", mount[0], mount[1], mount[2], mount[3]) - err := util.Mount(mount[0], mount[1], mount[2], mount[3]) - if err != nil { - return err - } - } - - return nil -} - -func remountRo(cfg *config.CloudConfig) error { - log.Info("Remouting root read only") - return util.Remount("/", "ro") -} - -func mountCgroups(cfg *config.CloudConfig) error { - for _, cgroup := range cgroups { - err := createDirs("/sys/fs/cgroup/" + cgroup) - if err != nil { - return err - } - - err = createMounts([][]string{ - {"none", "sys/fs/cgroup/" + cgroup, "cgroup", cgroup}, - }...) - if err != nil { - return err - } - } - - log.Debug("Done mouting cgroupfs") - - return nil -} - -func extractModules(cfg *config.CloudConfig) error { - if _, err := os.Stat(config.MODULES_ARCHIVE); os.IsNotExist(err) { - log.Debug("Modules do not exist") - return nil - } - - log.Debug("Extracting modules") - return util.ExtractTar(config.MODULES_ARCHIVE, "/") -} - -func setResolvConf(cfg *config.CloudConfig) error { - log.Debug("Creating /etc/resolv.conf") - //f, err := os.OpenFile("/etc/resolv.conf", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - f, err := os.Create("/etc/resolv.conf") + f, err := os.Open("/proc/modules") if err != nil { return err } - defer f.Close() - for _, dns := range cfg.Rancher.Network.Dns.Nameservers { - content := fmt.Sprintf("nameserver %s\n", dns) - if _, err = f.Write([]byte(content)); err != nil { - return err + reader := bufio.NewScanner(f) + for reader.Scan() { + mounted[strings.SplitN(reader.Text(), " ", 2)[0]] = true + } + + for _, module := range cfg.Modules { + if mounted[module] { + continue } - } - search := strings.Join(cfg.Rancher.Network.Dns.Search, " ") - if search != "" { - content := fmt.Sprintf("search %s\n", search) - if _, err = f.Write([]byte(content)); err != nil { - return err - } - } - - if cfg.Rancher.Network.Dns.Domain != "" { - content := fmt.Sprintf("domain %s\n", cfg.Rancher.Network.Dns.Domain) - if _, err = f.Write([]byte(content)); err != nil { - return err - } - } - - return nil -} - -func loadModules(cfg *config.CloudConfig) error { - filesystems, err := ioutil.ReadFile("/proc/filesystems") - if err != nil { - return err - } - - if !strings.Contains(string(filesystems), "nodev\toverlay\n") { - log.Debug("Loading overlay module") - err = exec.Command("/sbin/modprobe", "overlay").Run() - if err != nil { - return err - } - } - - for _, module := range cfg.Rancher.Modules { log.Debugf("Loading module %s", module) - err = exec.Command("/sbin/modprobe", module).Run() - if err != nil { + if err := exec.Command("modprobe", module).Run(); err != nil { log.Errorf("Could not load module %s, err %v", module, err) } } @@ -207,19 +58,18 @@ func loadModules(cfg *config.CloudConfig) error { return nil } -func sysInit(cfg *config.CloudConfig) error { - args := append([]string{SYSINIT}, os.Args[1:]...) +func sysInit(cfg *config.Config) error { + args := append([]string{config.SYSINIT_BIN}, os.Args[1:]...) - var cmd *exec.Cmd - if util.IsRunningInTty() { - cmd = exec.Command(args[0], args[1:]...) - cmd.Stdin = os.Stdin - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - } else { - cmd = exec.Command(args[0], args[1:]...) + cmd := &exec.Cmd{ + Path: config.ROS_BIN, + Args: args, } + cmd.Stdin = os.Stdin + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + if err := cmd.Start(); err != nil { return err } @@ -227,128 +77,88 @@ func sysInit(cfg *config.CloudConfig) error { return os.Stdin.Close() } -func execDocker(cfg *config.CloudConfig) error { - log.Info("Launching System Docker") - if !cfg.Rancher.Debug { - output, err := os.Create("/var/log/system-docker.log") - if err != nil { - return err - } - - syscall.Dup2(int(output.Fd()), int(os.Stdout.Fd())) - syscall.Dup2(int(output.Fd()), int(os.Stderr.Fd())) - } - - os.Stdin.Close() - return syscall.Exec(SYSTEM_DOCKER, cfg.Rancher.SystemDocker.Args, os.Environ()) -} - func MainInit() { if err := RunInit(); err != nil { log.Fatal(err) } } -func mountStateTmpfs(cfg *config.CloudConfig) error { - log.Debugf("State will not be persisted") - return util.Mount("none", STATE, "tmpfs", "") -} - -func mountState(cfg *config.CloudConfig) error { +func mountState(cfg *config.Config) error { var err error - if cfg.Rancher.State.Dev != "" { - dev := util.ResolveDevice(cfg.Rancher.State.Dev) - if dev == "" { - msg := fmt.Sprintf("Could not resolve device %q", cfg.Rancher.State.Dev) - log.Infof(msg) - return fmt.Errorf(msg) - } - log.Infof("Mounting state device %s to %s", dev, STATE) - - fsType := cfg.Rancher.State.FsType - if fsType == "auto" { - fsType, err = util.GetFsType(dev) - } - - if err == nil { - log.Debugf("FsType has been set to %s", fsType) - err = util.Mount(dev, STATE, fsType, "") - } - } else { - return mountStateTmpfs(cfg) - } - - return err -} - -func tryMountAndBootstrap(cfg *config.CloudConfig) error { - if err := mountState(cfg); err != nil { - if err := bootstrap(cfg); err != nil { - if cfg.Rancher.State.Required { - return err - } - return mountStateTmpfs(cfg) - } - if err := mountState(cfg); err != nil { - if cfg.Rancher.State.Required { - return err - } - return mountStateTmpfs(cfg) - } - } - return nil -} - -func createGroups(cfg *config.CloudConfig) error { - return ioutil.WriteFile("/etc/group", []byte("root:x:0:\n"), 0644) -} - -func touchSocket(cfg *config.CloudConfig) error { - for _, path := range []string{"/var/run/docker.sock", "/var/run/system-docker.sock"} { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return err - } - err := ioutil.WriteFile(path, []byte{}, 0700) - if err != nil { - return err - } - } - - return nil -} - -func setupSystemBridge(cfg *config.CloudConfig) error { - bridge, cidr := cfg.Rancher.SystemDocker.BridgeConfig() - if bridge == "" { + if cfg.State.Dev == "" { return nil } - return network.ApplyNetworkConfigs(&config.NetworkConfig{ - Interfaces: map[string]config.InterfaceConfig{ - bridge: { - Bridge: true, - Address: cidr, - }, - }, - }) + dev := util.ResolveDevice(cfg.State.Dev) + if dev == "" { + return fmt.Errorf("Could not resolve device %q", cfg.State.Dev) + } + fsType := cfg.State.FsType + if fsType == "auto" { + fsType, err = util.GetFsType(dev) + } + + if err != nil { + return err + } + + log.Debugf("FsType has been set to %s", fsType) + log.Infof("Mounting state device %s to %s", dev, STATE) + return util.Mount(dev, STATE, fsType, "") +} + +func tryMountState(cfg *config.Config) error { + if mountState(cfg) == nil { + return nil + } + + // If we failed to mount lets run bootstrap and try again + if err := bootstrap(cfg); err != nil { + return err + } + + return mountState(cfg) +} + +func tryMountAndBootstrap(cfg *config.Config) error { + if err := tryMountState(cfg); !cfg.State.Required && err != nil { + return nil + } else if err != nil { + return err + } + + log.Debugf("Switching to new root at %s", STATE) + return switchRoot(STATE) +} + +func getLaunchConfig(cfg *config.Config, dockerCfg *config.DockerConfig) (*dockerlaunch.Config, []string) { + var launchConfig dockerlaunch.Config + + args := dockerlaunch.ParseConfig(&launchConfig, append(dockerCfg.Args, dockerCfg.ExtraArgs...)...) + + launchConfig.DnsConfig.Nameservers = cfg.Network.Dns.Nameservers + launchConfig.DnsConfig.Search = cfg.Network.Dns.Search + + if !cfg.Debug { + launchConfig.LogFile = config.SYSTEM_DOCKER_LOG + } + + return &launchConfig, args } func RunInit() error { - var cfg config.CloudConfig + var cfg config.Config os.Setenv("PATH", "/sbin:/usr/sbin:/usr/bin") + // Magic setting to tell Docker to do switch_root and not pivot_root os.Setenv("DOCKER_RAMDISK", "true") initFuncs := []config.InitFunc{ - func(cfg *config.CloudConfig) error { - return createDirs(dirs...) + func(cfg *config.Config) error { + return dockerlaunch.PrepareFs(&mountConfig) }, - func(cfg *config.CloudConfig) error { - log.Info("Setting up mounts") - return createMounts(mounts...) - }, - func(cfg *config.CloudConfig) error { + func(cfg *config.Config) error { newCfg, err := config.LoadConfig() if err == nil { newCfg, err = config.LoadConfig() @@ -357,37 +167,21 @@ func RunInit() error { *cfg = *newCfg } - if cfg.Rancher.Debug { + if cfg.Debug { cfgString, _ := config.Dump(false, true) - log.Debugf("os-config dump: \n%s", cfgString) + if cfgString != "" { + log.Debugf("Config: %s", cfgString) + } } return err }, - mountCgroups, - func(cfg *config.CloudConfig) error { - return createSymlinks(cfg, symlinks) - }, - createGroups, - extractModules, loadModules, - setResolvConf, - setupSystemBridge, tryMountAndBootstrap, - func(cfg *config.CloudConfig) error { + func(cfg *config.Config) error { return cfg.Reload() }, loadModules, - setResolvConf, - func(cfg *config.CloudConfig) error { - return createDirs(postDirs...) - }, - func(cfg *config.CloudConfig) error { - return createMounts(postMounts...) - }, - touchSocket, - // Disable R/O root write now to support updating modules - //remountRo, sysInit, } @@ -395,5 +189,9 @@ func RunInit() error { return err } - return execDocker(&cfg) + launchConfig, args := getLaunchConfig(&cfg, &cfg.SystemDocker) + + log.Info("Launching System Docker") + _, err := dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...) + return err } diff --git a/init/root.go b/init/root.go new file mode 100644 index 00000000..241bd2a4 --- /dev/null +++ b/init/root.go @@ -0,0 +1,96 @@ +package init + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/rancher/docker-from-scratch" + "github.com/rancherio/os/config" +) + +func prepareRoot(rootfs string) error { + usr := path.Join(rootfs, "usr") + if err := os.Remove(usr); err != nil && !os.IsNotExist(err) { + log.Errorf("Failed to delete %s, possibly invalid RancherOS state partition: %v", usr, err) + return err + } + + return nil +} + +func copyMoveRoot(rootfs string) error { + usrVer := fmt.Sprintf("usr-%s", config.VERSION) + usr := path.Join(rootfs, usrVer) + + if err := archive.CopyWithTar("/usr", usr); err != nil { + return err + } + + if err := dockerlaunch.CreateSymlink(usrVer, path.Join(rootfs, "usr")); err != nil { + return err + } + + files, err := ioutil.ReadDir("/") + if err != nil { + return err + } + + for _, file := range files { + filename := path.Join("/", file.Name()) + + if filename == rootfs { + continue + } + + log.Debugf("Deleting %s", filename) + //if err := os.Remove(filename); err != nil { + if err := os.RemoveAll(filename); err != nil { + return err + } + //} + } + + return nil +} + +func switchRoot(rootfs string) error { + for _, i := range []string{"/dev", "/sys", "/proc", "/run"} { + log.Debugf("Moving mount %s to %s", i, path.Join(rootfs, i)) + if err := os.MkdirAll(path.Join(rootfs, i), 0755); err != nil { + return err + } + if err := syscall.Mount(i, path.Join(rootfs, i), "", syscall.MS_MOVE, ""); err != nil { + return err + } + } + + if err := copyMoveRoot(rootfs); err != nil { + return err + } + + if err := syscall.Chdir(rootfs); err != nil { + return err + } + + if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return err + } + + if err := syscall.Chroot("."); err != nil { + return err + } + + if err := syscall.Chdir("/"); err != nil { + return err + } + + log.Debugf("Successfully moved to new root at %s", rootfs) + os.Setenv("DOCKER_RAMDISK", "false") + + return nil +} diff --git a/main.go b/main.go index 6f65c249..3b8546af 100644 --- a/main.go +++ b/main.go @@ -14,6 +14,7 @@ import ( "github.com/rancherio/os/cmd/sysinit" "github.com/rancherio/os/cmd/systemdocker" "github.com/rancherio/os/cmd/wait" + "github.com/rancherio/os/config" osInit "github.com/rancherio/os/init" ) @@ -37,7 +38,7 @@ func registerCmd(cmd string, mainFunc func()) { func main() { registerCmd("/init", osInit.MainInit) - registerCmd(osInit.SYSINIT, sysinit.Main) + registerCmd(config.SYSINIT_BIN, sysinit.Main) registerCmd("/usr/bin/system-docker", systemdocker.Main) registerCmd("/sbin/poweroff", power.PowerOff) registerCmd("/sbin/reboot", power.Reboot) diff --git a/os-config.yml b/os-config.yml index 852a5efb..206aecfa 100644 --- a/os-config.yml +++ b/os-config.yml @@ -38,8 +38,8 @@ rancher: - /lib/modules:/lib/modules - /lib/firmware:/lib/firmware bootstrap_docker: - args: [docker, -d, -s, overlay, -b, none, --restart=false, -g, /var/lib/system-docker, - -G, root, -H, 'unix:///var/run/system-docker.sock'] + args: [-d, -s, overlay, -b, none, --restart=false, -g, /var/lib/system-docker, + -G, root, -H, 'unix:///var/run/system-docker.sock', --userland-proxy=false] cloud_init: datasources: - configdrive:/media/config-2 @@ -90,6 +90,7 @@ rancher: io.rancher.os.reloadconfig: true io.rancher.os.scope: system links: + - preload-user-images - cloud-init-pre - network net: host @@ -124,17 +125,17 @@ rancher: privileged: true read_only: true volumes: - - /init:/sbin/halt:ro - - /init:/sbin/poweroff:ro - - /init:/sbin/reboot:ro - - /init:/sbin/shutdown:ro - - /init:/sbin/netconf:ro - - /init:/usr/bin/cloud-init:ro - - /init:/usr/bin/rancherctl:ro - - /init:/usr/bin/ros:ro - - /init:/usr/bin/respawn:ro - - /init:/usr/bin/system-docker:ro - - /init:/usr/sbin/wait-for-docker:ro + - /usr/bin/ros:/sbin/halt:ro + - /usr/bin/ros:/sbin/poweroff:ro + - /usr/bin/ros:/sbin/reboot:ro + - /usr/bin/ros:/sbin/shutdown:ro + - /usr/bin/ros:/sbin/netconf:ro + - /usr/bin/ros:/usr/bin/cloud-init:ro + - /usr/bin/ros:/usr/bin/rancherctl:ro + - /usr/bin/ros:/usr/bin/ros:ro + - /usr/bin/ros:/usr/bin/respawn:ro + - /usr/bin/ros:/usr/bin/system-docker:ro + - /usr/bin/ros:/usr/sbin/wait-for-docker:ro - /lib/modules:/lib/modules - /usr/bin/docker:/usr/bin/docker:ro console: @@ -144,7 +145,6 @@ rancher: io.rancher.os.scope: system links: - cloud-init - - dockerwait # because console runs `loud-init -execute`, which may need docker net: host uts: host pid: host @@ -158,7 +158,6 @@ rancher: labels: io.rancher.os.scope: system links: - - cloud-init - network net: host uts: host @@ -298,7 +297,7 @@ rancher: - /home:/home - /opt:/opt system_docker: - args: [docker, -d, --log-driver, syslog, -s, overlay, -b, docker-sys, --fixed-cidr, + args: [docker, -d, -s, overlay, -b, docker-sys, --fixed-cidr, 172.18.42.1/16, --restart=false, -g, /var/lib/system-docker, -G, root, -H, 'unix:///var/run/system-docker.sock', --userland-proxy=false] upgrade: diff --git a/scripts/run b/scripts/run index 869aa8f8..b233d586 100755 --- a/scripts/run +++ b/scripts/run @@ -64,10 +64,15 @@ if [ ! -d ${INITRD_TMP} ]; then popd fi +if [ -e ${INITRD_CURRENT} ]; then + rm -f ${INITRD_CURRENT} +fi + ln -sf ${INITRD_TMP} ${INITRD_CURRENT} -cp bin/rancheros ${INITRD_TMP}/init -cp -f os-config.yml ${INITRD_TMP}/ +mkdir -p ${INITRD_TMP}/usr/{bin,share/ros} +cp bin/rancheros ${INITRD_TMP}/usr/bin/ros +cp -f os-config.yml ${INITRD_TMP}/usr/share/ros cd ${INITRD_TMP} find . | cpio -H newc -o > ${INITRD_TEST} From 9d76b79ac36e879655455d40713d645217386027 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 4 Aug 2015 14:45:38 -0700 Subject: [PATCH 3/9] Refactor to use libcompose --- cmd/control/cli.go | 6 - cmd/control/os.go | 81 +++-- cmd/control/reload.go | 53 ---- cmd/control/service.go | 4 +- compose/project.go | 162 ++++++++++ config/config.go | 36 +-- config/types.go | 12 +- docker/client.go | 29 +- docker/client_factory.go | 94 ++++++ docker/container.go | 632 -------------------------------------- docker/container_test.go | 307 ------------------ docker/env.go | 55 ++++ docker/factory.go | 103 ------- docker/service.go | 159 ++++++++++ docker/service_factory.go | 27 ++ docker/services.go | 137 --------- docker/util.go | 11 + init/bootstrap.go | 11 +- init/init.go | 42 +-- init/root.go | 5 +- init/sysinit.go | 42 +-- main.go | 2 + os-config.yml | 44 +-- scripts/run | 2 +- util/backoff.go | 54 ++++ util/util.go | 13 + 26 files changed, 665 insertions(+), 1458 deletions(-) delete mode 100644 cmd/control/reload.go create mode 100644 compose/project.go create mode 100644 docker/client_factory.go delete mode 100644 docker/container.go delete mode 100644 docker/container_test.go create mode 100644 docker/env.go delete mode 100644 docker/factory.go create mode 100644 docker/service.go create mode 100644 docker/service_factory.go delete mode 100644 docker/services.go create mode 100644 docker/util.go create mode 100644 util/backoff.go diff --git a/cmd/control/cli.go b/cmd/control/cli.go index 1ba70fe6..bc7314f0 100644 --- a/cmd/control/cli.go +++ b/cmd/control/cli.go @@ -47,12 +47,6 @@ func Main() { HideHelp: true, Subcommands: serviceSubCommands(), }, - //{ - // Name: "reload", - // ShortName: "a", - // Usage: "reload configuration of a service and restart the container", - // Action: reload, - //}, { Name: "os", Usage: "operating system upgrade/downgrade", diff --git a/cmd/control/os.go b/cmd/control/os.go index 76d4f7cd..81453412 100644 --- a/cmd/control/os.go +++ b/cmd/control/os.go @@ -15,7 +15,9 @@ import ( dockerClient "github.com/fsouza/go-dockerclient" "github.com/codegangsta/cli" + "github.com/docker/libcompose/project" "github.com/rancherio/os/cmd/power" + "github.com/rancherio/os/compose" "github.com/rancherio/os/config" "github.com/rancherio/os/docker" ) @@ -147,7 +149,9 @@ func osUpgrade(c *cli.Context) { if c.Args().Present() { log.Fatalf("invalid arguments %v", c.Args()) } - startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot")) + if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot")); err != nil { + log.Fatal(err) + } } func osVersion(c *cli.Context) { @@ -164,22 +168,28 @@ func yes(in *bufio.Reader, question string) bool { return strings.ToLower(line[0:1]) == "y" } -func startUpgradeContainer(image string, stage, force, reboot bool) { +func startUpgradeContainer(image string, stage, force, reboot bool) error { in := bufio.NewReader(os.Stdin) - container := docker.NewContainer(config.DOCKER_SYSTEM_HOST, &config.ContainerConfig{ - Cmd: "--name=os-upgrade " + - "--log-driver=json-file " + - "--rm " + - "--privileged " + - "--net=host " + - image + " " + - "-t rancher-upgrade " + - "-r " + config.VERSION, - }).Stage() + container, err := compose.CreateService(nil, "os-upgrade", &project.ServiceConfig{ + LogDriver: "json-file", + Privileged: true, + Net: "host", + Image: image, + Labels: project.NewSliceorMap(map[string]string{ + config.SCOPE: config.SYSTEM, + }), + Command: project.NewCommand( + "-t", "rancher-upgrade", + "-r", config.VERSION, + ), + }) + if err != nil { + return err + } - if container.Err != nil { - log.Fatal(container.Err) + if err := container.Pull(); err != nil { + return err } if !stage { @@ -191,46 +201,25 @@ func startUpgradeContainer(image string, stage, force, reboot bool) { } } - container.Start() - if container.Err != nil { - log.Fatal(container.Err) + if err := container.Start(); err != nil { + return err } - client, err := docker.NewClient(config.DOCKER_SYSTEM_HOST) - if err != nil { - log.Fatal(err) + if err := container.Log(); err != nil { + return err } - go func() { - client.Logs(dockerClient.LogsOptions{ - Container: container.Container.ID, - OutputStream: os.Stdout, - ErrorStream: os.Stderr, - Follow: true, - Stdout: true, - Stderr: true, - }) - }() - - exit, err := client.WaitContainer(container.Container.ID) - if err != nil { - log.Fatal(err) + if err := container.Up(); err != nil { + return err } - if container.Err != nil { - log.Fatal(container.Err) - } - - if exit == 0 { - if reboot && (force || yes(in, "Continue with reboot")) { - log.Info("Rebooting") - power.Reboot() - } - } else { - log.Error("Upgrade failed") - os.Exit(exit) + if reboot && (force || yes(in, "Continue with reboot")) { + log.Info("Rebooting") + power.Reboot() } } + + return nil } func parseBody(body []byte) (*Images, error) { diff --git a/cmd/control/reload.go b/cmd/control/reload.go deleted file mode 100644 index a34e3a6c..00000000 --- a/cmd/control/reload.go +++ /dev/null @@ -1,53 +0,0 @@ -package control - -import ( - log "github.com/Sirupsen/logrus" - - "github.com/codegangsta/cli" - "github.com/rancherio/os/config" - "github.com/rancherio/os/docker" -) - -//func parseContainers(cfg *config.Config) map[string]*docker.Container { -// result := map[string]*docker.Container{} -// -// for _, containerConfig := range cfg.SystemContainers { -// container := docker.NewContainer(config.DOCKER_SYSTEM_HOST, &containerConfig) -// if containerConfig.Id != "" { -// result[containerConfig.Id] = container -// } -// } -// -// return result -//} - -func reload(c *cli.Context) { - _, err := config.LoadConfig() - if err != nil { - log.Fatal(err) - } - - containers := map[string]*docker.Container{} //parseContainers(cfg) - toStart := make([]*docker.Container, 0, len(c.Args())) - - for _, id := range c.Args() { - if container, ok := containers[id]; ok { - toStart = append(toStart, container.Stage()) - } - } - - var firstErr error - for _, c := range toStart { - err := c.Start().Err - if err != nil { - log.Errorf("Failed to start %s : %v", c.ContainerCfg.Id, err) - if firstErr != nil { - firstErr = err - } - } - } - - if firstErr != nil { - log.Fatal(firstErr) - } -} diff --git a/cmd/control/service.go b/cmd/control/service.go index 9c46b9b7..2fade009 100644 --- a/cmd/control/service.go +++ b/cmd/control/service.go @@ -6,8 +6,8 @@ import ( "strings" "github.com/codegangsta/cli" + "github.com/rancherio/os/compose" "github.com/rancherio/os/config" - "github.com/rancherio/os/docker" "github.com/rancherio/os/util" ) @@ -93,7 +93,7 @@ func enable(c *cli.Context) { if strings.HasPrefix(service, "/") && !strings.HasPrefix(service, "/var/lib/rancher/conf") { log.Fatalf("ERROR: Service should be in path /var/lib/rancher/conf") } - if _, err := docker.LoadServiceResource(service, true, cfg); err != nil { + if _, err := compose.LoadServiceResource(service, true, cfg); err != nil { log.Fatalf("could not load service %s", service) } cfg.Rancher.ServicesInclude[service] = true diff --git a/compose/project.go b/compose/project.go new file mode 100644 index 00000000..08a2230f --- /dev/null +++ b/compose/project.go @@ -0,0 +1,162 @@ +package compose + +import ( + log "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/cli/logger" + "github.com/docker/libcompose/docker" + "github.com/docker/libcompose/project" + "github.com/rancherio/os/config" + rosDocker "github.com/rancherio/os/docker" + "github.com/rancherio/os/util" +) + +func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { + if cfg == nil { + var err error + cfg, err = config.LoadConfig() + if err != nil { + return nil, err + } + } + + p, err := RunServiceSet("once", cfg, map[string]*project.ServiceConfig{ + name: serviceConfig, + }) + if err != nil { + return nil, err + } + + return p.CreateService(name) +} + +func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) { + p, err := newProject(name, cfg) + if err != nil { + return nil, err + } + + addServices(p, cfg, map[string]string{}, configs) + + return p, p.Up() +} + +func RunServices(cfg *config.CloudConfig) error { + p, err := newCoreServiceProject(cfg) + if err != nil { + return err + } + + return p.Up() +} + +func newProject(name string, cfg *config.CloudConfig) (*project.Project, error) { + clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{}) + if err != nil { + return nil, err + } + + serviceFactory := &rosDocker.ServiceFactory{ + Deps: map[string][]string{}, + } + context := &docker.Context{ + ClientFactory: clientFactory, + Context: project.Context{ + ProjectName: name, + EnvironmentLookup: rosDocker.NewConfigEnvironment(cfg), + ServiceFactory: serviceFactory, + Rebuild: true, + Log: cfg.Rancher.Log, + LoggerFactory: logger.NewColorLoggerFactory(), + }, + } + serviceFactory.Context = context + + return docker.NewProject(context) +} + +func addServices(p *project.Project, cfg *config.CloudConfig, enabled map[string]string, configs map[string]*project.ServiceConfig) { + // Note: we ignore errors while loading services + for name, serviceConfig := range cfg.Rancher.Services { + hash := project.GetServiceHash(name, *serviceConfig) + + if enabled[name] == hash { + continue + } + + if err := p.AddConfig(name, serviceConfig); err != nil { + log.Infof("Failed loading service %s", name) + continue + } + + enabled[name] = hash + } +} + +func newCoreServiceProject(cfg *config.CloudConfig) (*project.Project, error) { + network := false + projectEvents := make(chan project.ProjectEvent) + enabled := make(map[string]string) + + p, err := newProject("os", cfg) + if err != nil { + return nil, err + } + + p.AddListener(project.NewDefaultListener(p)) + p.AddListener(projectEvents) + + p.ReloadCallback = func() error { + err := cfg.Reload() + if err != nil { + return err + } + + for service, serviceEnabled := range cfg.Rancher.ServicesInclude { + if enabled[service] != "" || !serviceEnabled { + continue + } + + bytes, err := LoadServiceResource(service, network, cfg) + if err != nil { + if err == util.ErrNoNetwork { + log.Debugf("Can not load %s, networking not enabled", service) + } else { + log.Errorf("Failed to load %s : %v", service, err) + } + continue + } + + err = p.Load(bytes) + if err != nil { + log.Errorf("Failed to load %s : %v", service, err) + continue + } + + enabled[service] = service + } + + addServices(p, cfg, enabled, cfg.Rancher.Services) + + return nil + } + + go func() { + for event := range projectEvents { + if event.Event == project.CONTAINER_STARTED && event.ServiceName == "network" { + network = true + } + } + }() + + err = p.ReloadCallback() + if err != nil { + log.Errorf("Failed to reload os: %v", err) + return nil, err + } + + return p, nil +} + +func LoadServiceResource(name string, network bool, cfg *config.CloudConfig) ([]byte, error) { + return util.LoadResource(name, network, cfg.Rancher.Repositories.ToArray()) +} diff --git a/config/config.go b/config/config.go index 145de3fe..03c5cf13 100644 --- a/config/config.go +++ b/config/config.go @@ -5,8 +5,8 @@ import ( "strings" log "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/project" "github.com/rancherio/os/util" - "github.com/rancherio/rancher-compose/librcompose/project" "gopkg.in/yaml.v2" ) @@ -139,18 +139,6 @@ func Dump(private, full bool) (string, error) { return string(bytes), err } -func (c *CloudConfig) configureConsole() error { - if console, ok := c.Rancher.Services[CONSOLE_CONTAINER]; ok { - if c.Rancher.Console.Persistent { - console.Labels.MapParts()[REMOVE] = "false" - } else { - console.Labels.MapParts()[REMOVE] = "true" - } - } - - return nil -} - func (c *CloudConfig) amendNils() error { if c.Rancher.Environment == nil { c.Rancher.Environment = map[string]string{} @@ -173,7 +161,6 @@ func (c *CloudConfig) amendNils() error { func (c *CloudConfig) readGlobals() error { return util.ShortCircuit( c.readCmdline, - c.configureConsole, // TODO: this smells (it is a write hidden inside a read) ) } @@ -216,27 +203,6 @@ func (c *CloudConfig) Set(key string, value interface{}) error { return c.Reload() } -func (d *DockerConfig) BridgeConfig() (string, string) { - var name, cidr string - - args := append(d.Args, d.ExtraArgs...) - for i, opt := range args { - if opt == "-b" && i < len(args)-1 { - name = args[i+1] - } - - if opt == "--fixed-cidr" && i < len(args)-1 { - cidr = args[i+1] - } - } - - if name == "" || name == "none" { - return "", "" - } else { - return name, cidr - } -} - func (r Repositories) ToArray() []string { result := make([]string, 0, len(r)) for _, repo := range r { diff --git a/config/types.go b/config/types.go index eb08cf7b..7bc88e88 100644 --- a/config/types.go +++ b/config/types.go @@ -2,12 +2,11 @@ package config import ( "github.com/coreos/coreos-cloudinit/config" + "github.com/docker/libcompose/project" "github.com/rancher/netconf" - "github.com/rancherio/rancher-compose/librcompose/project" ) const ( - CONSOLE_CONTAINER = "console" DOCKER_BIN = "/usr/bin/docker" ROS_BIN = "/usr/bin/ros" SYSINIT_BIN = "/usr/bin/ros-sysinit" @@ -24,7 +23,6 @@ const ( HASH = "io.rancher.os.hash" ID = "io.rancher.os.id" DETACH = "io.rancher.os.detach" - REMOVE = "io.rancher.os.remove" CREATE_ONLY = "io.rancher.os.createonly" RELOAD_CONFIG = "io.rancher.os.reloadconfig" SCOPE = "io.rancher.os.scope" @@ -73,8 +71,8 @@ type RancherConfig struct { Autoformat map[string]*project.ServiceConfig `yaml:"autoformat,omitempty"` BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"` CloudInit CloudInit `yaml:"cloud_init,omitempty"` - Console ConsoleConfig `yaml:"console,omitempty"` Debug bool `yaml:"debug,omitempty"` + Log bool `yaml:"log,omitempty"` Disable []string `yaml:"disable,omitempty"` ServicesInclude map[string]bool `yaml:"services_include,omitempty"` Modules []string `yaml:"modules,omitempty"` @@ -84,15 +82,9 @@ type RancherConfig struct { State StateConfig `yaml:"state,omitempty"` SystemDocker DockerConfig `yaml:"system_docker,omitempty"` Upgrade UpgradeConfig `yaml:"upgrade,omitempty"` - UserContainers []ContainerConfig `yaml:"user_containers,omitempty"` UserDocker DockerConfig `yaml:"user_docker,omitempty"` } -type ConsoleConfig struct { - Tail bool `yaml:"tail,omitempty"` - Persistent bool `yaml:"persistent,omitempty"` -} - type UpgradeConfig struct { Url string `yaml:"url,omitempty"` Image string `yaml:"image,omitempty"` diff --git a/docker/client.go b/docker/client.go index 7e1750c4..0df4885b 100644 --- a/docker/client.go +++ b/docker/client.go @@ -1,10 +1,6 @@ package docker import ( - "time" - - log "github.com/Sirupsen/logrus" - dockerClient "github.com/fsouza/go-dockerclient" "github.com/rancherio/os/config" ) @@ -28,25 +24,10 @@ func NewClient(endpoint string) (*dockerClient.Client, error) { return nil, err } - retry := false - for i := 0; i < (MAX_WAIT / INTERVAL); i++ { - _, err = client.Info() - if err == nil { - break - } + err = ClientOK(endpoint, func() bool { + _, err := client.Info() + return err == nil + }) - retry = true - - log.Infof("Waiting for Docker at %s", endpoint) - time.Sleep(INTERVAL * time.Millisecond) - } - - if err != nil { - return nil, err - } - - if retry { - log.Infof("Connected to Docker at %s", endpoint) - } - return client, nil + return client, err } diff --git a/docker/client_factory.go b/docker/client_factory.go new file mode 100644 index 00000000..0c0726ea --- /dev/null +++ b/docker/client_factory.go @@ -0,0 +1,94 @@ +package docker + +import ( + "fmt" + "sync" + + "github.com/docker/libcompose/docker" + "github.com/docker/libcompose/project" + "github.com/docker/machine/log" + "github.com/rancherio/os/config" + "github.com/rancherio/os/util" + "github.com/samalba/dockerclient" +) + +type ClientFactory struct { + userClient dockerclient.Client + systemClient dockerclient.Client + userOnce sync.Once + systemOnce sync.Once +} + +func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) { + userOpts := opts + systemOpts := opts + + userOpts.Host = config.DOCKER_HOST + systemOpts.Host = config.DOCKER_SYSTEM_HOST + + userClient, err := docker.CreateClient(userOpts) + if err != nil { + return nil, err + } + + systemClient, err := docker.CreateClient(systemOpts) + if err != nil { + return nil, err + } + + return &ClientFactory{ + userClient: userClient, + systemClient: systemClient, + }, nil +} + +func (c *ClientFactory) Create(service project.Service) dockerclient.Client { + if IsSystemContainer(service.Config()) { + waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST) + return c.systemClient + } + + waitFor(&c.userOnce, c.userClient, config.DOCKER_HOST) + return c.userClient +} + +func waitFor(once *sync.Once, client dockerclient.Client, endpoint string) { + once.Do(func() { + err := ClientOK(endpoint, func() bool { + _, err := client.Info() + return err == nil + }) + if err != nil { + panic(err.Error()) + } + }) +} + +func ClientOK(endpoint string, test func() bool) error { + backoff := util.Backoff{} + defer backoff.Close() + + var err error + retry := false + for ok := range backoff.Start() { + if !ok { + err = fmt.Errorf("Timeout waiting for Docker at %s", endpoint) + break + } + if test() { + break + } + retry = true + log.Infof("Waiting for Docker at %s", endpoint) + } + + if err != nil { + return err + } + + if retry { + log.Infof("Connected to Docker at %s", endpoint) + } + + return nil +} diff --git a/docker/container.go b/docker/container.go deleted file mode 100644 index 3f244446..00000000 --- a/docker/container.go +++ /dev/null @@ -1,632 +0,0 @@ -package docker - -import ( - "crypto/sha1" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "reflect" - "sort" - "strings" - - log "github.com/Sirupsen/logrus" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/runconfig" - shlex "github.com/flynn/go-shlex" - dockerClient "github.com/fsouza/go-dockerclient" - "github.com/rancherio/os/config" - "github.com/rancherio/os/util" - "github.com/rancherio/rancher-compose/librcompose/docker" - "github.com/rancherio/rancher-compose/librcompose/project" -) - -type Container struct { - Err error - Name string - remove bool - detach bool - Config *runconfig.Config - HostConfig *runconfig.HostConfig - dockerHost string - Container *dockerClient.Container - ContainerCfg *config.ContainerConfig -} - -type ByCreated []dockerClient.APIContainers - -func (c ByCreated) Len() int { return len(c) } -func (c ByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c ByCreated) Less(i, j int) bool { return c[j].Created < c[i].Created } - -func getHash(containerCfg *config.ContainerConfig) string { - hash := sha1.New() - - io.WriteString(hash, fmt.Sprintln(containerCfg.Id)) - io.WriteString(hash, fmt.Sprintln(containerCfg.Cmd)) - io.WriteString(hash, fmt.Sprintln(containerCfg.MigrateVolumes)) - io.WriteString(hash, fmt.Sprintln(containerCfg.ReloadConfig)) - io.WriteString(hash, fmt.Sprintln(containerCfg.CreateOnly)) - - if containerCfg.Service != nil { - //Get values of Service through reflection - val := reflect.ValueOf(containerCfg.Service).Elem() - - //Create slice to sort the keys in Service Config, which allow constant hash ordering - serviceKeys := []string{} - - //Create a data structure of map of values keyed by a string - unsortedKeyValue := make(map[string]interface{}) - - //Get all keys and values in Service Configuration - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - keyField := val.Type().Field(i) - - serviceKeys = append(serviceKeys, keyField.Name) - unsortedKeyValue[keyField.Name] = valueField.Interface() - } - - //Sort serviceKeys alphabetically - sort.Strings(serviceKeys) - - //Go through keys and write hash - for _, serviceKey := range serviceKeys { - serviceValue := unsortedKeyValue[serviceKey] - - io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey)) - - switch s := serviceValue.(type) { - case project.SliceorMap: - sliceKeys := []string{} - for lkey := range s.MapParts() { - if lkey != "io.rancher.os.hash" { - sliceKeys = append(sliceKeys, lkey) - } - } - sort.Strings(sliceKeys) - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey])) - } - case project.MaporEqualSlice: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case project.MaporColonSlice: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case project.MaporSpaceSlice: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case project.Command: - sliceKeys := s.Slice() - // do not sort keys as the order matters - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case project.Stringorslice: - sliceKeys := s.Slice() - sort.Strings(sliceKeys) - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case []string: - sliceKeys := s - sort.Strings(sliceKeys) - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - default: - io.WriteString(hash, fmt.Sprintf("%v", serviceValue)) - } - } - } - - return hex.EncodeToString(hash.Sum(nil)) -} - -func StartAndWait(dockerHost string, containerCfg *config.ContainerConfig) error { - container := NewContainer(dockerHost, containerCfg).start(false, true) - return container.Err -} - -func NewContainerFromService(dockerHost string, name string, service *project.ServiceConfig) *Container { - c := &Container{ - Name: name, - dockerHost: dockerHost, - ContainerCfg: &config.ContainerConfig{ - Id: name, - Service: service, - }, - } - - return c.Parse() -} - -func NewContainer(dockerHost string, containerCfg *config.ContainerConfig) *Container { - c := &Container{ - dockerHost: dockerHost, - ContainerCfg: containerCfg, - } - return c.Parse() -} - -func (c *Container) returnErr(err error) *Container { - c.Err = err - return c -} - -func getByLabel(client *dockerClient.Client, key, value string) (*dockerClient.APIContainers, error) { - containers, err := client.ListContainers(dockerClient.ListContainersOptions{ - All: true, - Filters: map[string][]string{ - config.LABEL: {fmt.Sprintf("%s=%s", key, value)}, - }, - }) - - if err != nil { - return nil, err - } - - if len(containers) == 0 { - return nil, nil - } - - sort.Sort(ByCreated(containers)) - return &containers[0], nil -} - -func (c *Container) Lookup() *Container { - c.Parse() - - if c.Err != nil || (c.Container != nil && c.Container.HostConfig != nil) { - return c - } - - hash := getHash(c.ContainerCfg) - - client, err := NewClient(c.dockerHost) - if err != nil { - return c.returnErr(err) - } - - containers, err := client.ListContainers(dockerClient.ListContainersOptions{ - All: true, - Filters: map[string][]string{ - config.LABEL: {fmt.Sprintf("%s=%s", config.HASH, hash)}, - }, - }) - if err != nil { - return c.returnErr(err) - } - - if len(containers) == 0 { - return c - } - - c.Container, c.Err = inspect(client, containers[0].ID) - - return c -} - -func inspect(client *dockerClient.Client, id string) (*dockerClient.Container, error) { - c, err := client.InspectContainer(id) - if err != nil { - return nil, err - } - - if strings.HasPrefix(c.Name, "/") { - c.Name = c.Name[1:] - } - - return c, err -} - -func (c *Container) Exists() bool { - c.Lookup() - return c.Container != nil -} - -func (c *Container) Reset() *Container { - c.Config = nil - c.HostConfig = nil - c.Container = nil - c.Err = nil - - return c -} - -func (c *Container) requiresSyslog() bool { - return (c.ContainerCfg.Service.LogDriver == "" || c.ContainerCfg.Service.LogDriver == "syslog") -} - -func (c *Container) requiresUserDocker() bool { - if c.dockerHost == config.DOCKER_HOST { - return true - } - - return false -} - -func (c *Container) hasLink(link string) bool { - return util.Contains(c.ContainerCfg.Service.Links.Slice(), link) -} - -func (c *Container) addLink(link string) { - if c.hasLink(link) { - return - } - - log.Debugf("Adding %s link to %s", link, c.Name) - c.ContainerCfg.Service.Links = project.NewMaporColonSlice(append(c.ContainerCfg.Service.Links.Slice(), link)) -} - -func (c *Container) parseService() { - if c.requiresSyslog() { - c.addLink("syslog") - } - - if c.requiresUserDocker() { - c.addLink("dockerwait") - } else if c.ContainerCfg.Service.Image != "" { - client, err := NewClient(c.dockerHost) - if err != nil { - c.Err = err - return - } - - i, _ := client.InspectImage(c.ContainerCfg.Service.Image) - if i == nil { - c.addLink("network") - } - } - - cfg, hostConfig, err := docker.Convert(c.ContainerCfg.Service) - if err != nil { - c.Err = err - return - } - - c.Config = cfg - c.HostConfig = hostConfig - - c.detach = c.Config.Labels[config.DETACH] != "false" - c.remove = c.Config.Labels[config.REMOVE] != "false" - c.ContainerCfg.CreateOnly = c.Config.Labels[config.CREATE_ONLY] == "true" - c.ContainerCfg.ReloadConfig = c.Config.Labels[config.RELOAD_CONFIG] == "true" -} - -func (c *Container) parseCmd() { - flags := flag.NewFlagSet("run", flag.ExitOnError) - - flRemove := flags.Bool([]string{"#rm", "-rm"}, false, "") - flDetach := flags.Bool([]string{"d", "-detach"}, false, "") - flName := flags.String([]string{"#name", "-name"}, "", "") - - args, err := shlex.Split(c.ContainerCfg.Cmd) - if err != nil { - c.Err = err - return - } - - log.Debugf("Parsing [%s]", strings.Join(args, ",")) - c.Config, c.HostConfig, _, c.Err = runconfig.Parse(flags, args) - - c.Name = *flName - c.detach = *flDetach - c.remove = *flRemove -} - -func (c *Container) Parse() *Container { - if c.Config != nil || c.Err != nil { - return c - } - - if len(c.ContainerCfg.Cmd) > 0 { - c.parseCmd() - } else if c.ContainerCfg.Service != nil { - c.parseService() - } else { - c.Err = errors.New("Cmd or Service must be set") - return c - } - - if c.ContainerCfg.Id == "" { - c.ContainerCfg.Id = c.Name - } - - return c -} - -func (c *Container) Create() *Container { - return c.start(true, false) -} - -func (c *Container) Start() *Container { - return c.start(false, false) -} - -func (c *Container) StartAndWait() *Container { - return c.start(false, true) -} - -func (c *Container) Stage() *Container { - c.Parse() - - if c.Err != nil { - return c - } - - client, err := NewClient(c.dockerHost) - if err != nil { - c.Err = err - return c - } - - _, err = client.InspectImage(c.Config.Image) - if err == dockerClient.ErrNoSuchImage { - toPull := c.Config.Image - _, tag := parsers.ParseRepositoryTag(toPull) - if tag == "" { - toPull += ":latest" - } - c.Err = client.PullImage(dockerClient.PullImageOptions{ - Repository: toPull, - OutputStream: os.Stdout, - }, dockerClient.AuthConfiguration{}) - } else if err != nil { - log.Errorf("Failed to stage: %s: %v", c.Config.Image, err) - c.Err = err - } - - return c -} - -func (c *Container) Delete() *Container { - c.Parse() - c.Stage() - c.Lookup() - - if c.Err != nil { - return c - } - - if !c.Exists() { - return c - } - - client, err := NewClient(c.dockerHost) - if err != nil { - return c.returnErr(err) - } - - err = client.RemoveContainer(dockerClient.RemoveContainerOptions{ - ID: c.Container.ID, - Force: true, - }) - if err != nil { - return c.returnErr(err) - } - - return c -} - -func (c *Container) renameCurrent(client *dockerClient.Client) error { - if c.Name == "" { - return nil - } - - if c.Name == c.Container.Name { - return nil - } - - err := client.RenameContainer(dockerClient.RenameContainerOptions{ID: c.Container.ID, Name: c.Name}) - if err != nil { - return err - } - - c.Container, err = inspect(client, c.Container.ID) - return err -} - -func (c *Container) renameOld(client *dockerClient.Client, opts *dockerClient.CreateContainerOptions) error { - if len(opts.Name) == 0 { - return nil - } - - existing, err := inspect(client, opts.Name) - if _, ok := err.(*dockerClient.NoSuchContainer); ok { - return nil - } - - if err != nil { - return nil - } - - if c.Container != nil && existing.ID == c.Container.ID { - return nil - } - - var newName string - if label, ok := existing.Config.Labels[config.HASH]; ok { - newName = fmt.Sprintf("%s-%s", existing.Name, label) - } else { - newName = fmt.Sprintf("%s-unknown-%s", existing.Name, util.RandSeq(12)) - } - - if existing.State.Running { - err := client.StopContainer(existing.ID, 2) - if err != nil { - return err - } - - _, err = client.WaitContainer(existing.ID) - if err != nil { - return err - } - } - - log.Debugf("Renaming %s to %s", existing.Name, newName) - return client.RenameContainer(dockerClient.RenameContainerOptions{ID: existing.ID, Name: newName}) -} - -func (c *Container) getCreateOpts(client *dockerClient.Client) (*dockerClient.CreateContainerOptions, error) { - bytes, err := json.Marshal(c) - if err != nil { - log.Errorf("Failed to marshall: %v", c) - return nil, err - } - - var opts dockerClient.CreateContainerOptions - - err = json.Unmarshal(bytes, &opts) - if err != nil { - log.Errorf("Failed to unmarshall: %s", string(bytes)) - return nil, err - } - - if opts.Config.Labels == nil { - opts.Config.Labels = make(map[string]string) - } - - hash := getHash(c.ContainerCfg) - - opts.Config.Labels[config.HASH] = hash - opts.Config.Labels[config.ID] = c.ContainerCfg.Id - - return &opts, nil -} - -func appendVolumesFrom(client *dockerClient.Client, containerCfg *config.ContainerConfig, opts *dockerClient.CreateContainerOptions) error { - if !containerCfg.MigrateVolumes { - return nil - } - - container, err := getByLabel(client, config.ID, containerCfg.Id) - if err != nil || container == nil { - return err - } - - if opts.HostConfig.VolumesFrom == nil { - opts.HostConfig.VolumesFrom = []string{container.ID} - } else { - opts.HostConfig.VolumesFrom = append(opts.HostConfig.VolumesFrom, container.ID) - } - - return nil -} - -func (c *Container) start(createOnly, wait bool) *Container { - log.Debugf("Container: STARTING '%v', createOnly: %v, !detach: %v, wait: %v", c.Name, createOnly, !c.detach, wait) - c.Lookup() - c.Stage() - - if c.Err != nil { - return c - } - - client, err := NewClient(c.dockerHost) - if err != nil { - return c.returnErr(err) - } - - created := false - - opts, err := c.getCreateOpts(client) - if err != nil { - log.Errorf("Failed to create container create options: %v", err) - return c.returnErr(err) - } - - if c.Exists() && c.remove { - log.Debugf("Deleting container %s", c.Container.ID) - c.Delete() - - if c.Err != nil { - return c - } - - c.Reset().Lookup() - if c.Err != nil { - return c - } - } - - if !c.Exists() { - err = c.renameOld(client, opts) - if err != nil { - return c.returnErr(err) - } - - err := appendVolumesFrom(client, c.ContainerCfg, opts) - if err != nil { - return c.returnErr(err) - } - - c.Container, err = client.CreateContainer(*opts) - created = true - if err != nil { - return c.returnErr(err) - } - } - - hostConfig := c.Container.HostConfig - if created { - hostConfig = opts.HostConfig - } - - if createOnly { - return c - } - - if !c.Container.State.Running { - if !created { - err = c.renameOld(client, opts) - if err != nil { - return c.returnErr(err) - } - } - - err = c.renameCurrent(client) - if err != nil { - return c.returnErr(err) - } - - err = client.StartContainer(c.Container.ID, hostConfig) - if err != nil { - log.Errorf("Error from Docker %s", err) - return c.returnErr(err) - } - } - - log.Debugf("Container: WAIT? '%v' !c.detach && wait: %v", c.Name, !c.detach && wait) - if !c.detach && wait { - var exitCode int - exitCode, c.Err = client.WaitContainer(c.Container.ID) - log.Debugf("Container: FINISHED '%v', exitCode: %v", c.Name, exitCode) - if exitCode != 0 { - c.Err = errors.New(fmt.Sprintf("Container %s exited with code %d", c.Name, exitCode)) - } - return c - } - - return c -} diff --git a/docker/container_test.go b/docker/container_test.go deleted file mode 100644 index 06c613f6..00000000 --- a/docker/container_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package docker - -import ( - "fmt" - "strings" - "testing" - - "github.com/rancherio/os/config" - "github.com/rancherio/rancher-compose/librcompose/project" - "github.com/stretchr/testify/require" - - dockerClient "github.com/fsouza/go-dockerclient" - "os" -) - -func testDockerHost(t *testing.T) { - assert := require.New(t) - assert.Equal(os.Getenv("DOCKER_HOST"), config.DOCKER_HOST) -} - -func TestHash(t *testing.T) { - assert := require.New(t) - - hash := getHash(&config.ContainerConfig{ - Id: "id", - Cmd: "1 2 3", - }) - - hash2 := getHash(&config.ContainerConfig{ - Id: "id2", - Cmd: "1 2 3", - }) - - hash3 := getHash(&config.ContainerConfig{ - Id: "id3", - Cmd: "1 2 3 4", - }) - - assert.Equal("d601444333c7fb4cb955bcca36c5ed59b6fa8c3f", hash, "") - assert.NotEqual(hash, hash2, "") - assert.NotEqual(hash2, hash3, "") - assert.NotEqual(hash, hash3, "") -} - -func TestHash2(t *testing.T) { - assert := require.New(t) - - cfg := &config.ContainerConfig{ - Id: "docker-volumes", - Cmd: "", - MigrateVolumes: false, - ReloadConfig: false, - CreateOnly: true, - Service: &project.ServiceConfig{ - CapAdd: nil, - CapDrop: nil, - CpuShares: 0, - Command: project.NewCommand(), - Detach: "", - Dns: project.NewStringorslice(), - DnsSearch: project.NewStringorslice(), - DomainName: "", - Entrypoint: project.NewCommand(), - EnvFile: project.NewStringorslice(), - Environment: project.NewMaporEqualSlice([]string{}), - Hostname: "", - Image: "state", - Labels: project.NewSliceorMap(map[string]string{ - "io.rancher.os.createonly": "true", - "io.rancher.os.scope": "system"}), - Links: project.NewMaporColonSlice(nil), - LogDriver: "json-file", - MemLimit: 0, - Name: "", - Net: "none", - Pid: "", - Ipc: "", - Ports: nil, - Privileged: true, - Restart: "", - ReadOnly: true, - StdinOpen: false, - Tty: false, - User: "", - Volumes: []string{ - "/var/lib/docker:/var/lib/docker", - "/var/lib/rancher/conf:/var/lib/rancher/conf", - "/var/lib/system-docker:/var/lib/system-docker"}, - VolumesFrom: nil, - WorkingDir: "", - Expose: nil, - ExternalLinks: nil}, - } - - for i := 0; i < 1000; i++ { - assert.Equal(getHash(cfg), getHash(cfg), fmt.Sprintf("Failed at iteration: %v", i)) - } -} - -func TestBool2String(t *testing.T) { - assert := require.New(t) - assert.Equal("true", fmt.Sprint(true), "") -} - -func TestParse(t *testing.T) { - assert := require.New(t) - - cfg := &config.ContainerConfig{ - Cmd: "--name c1 " + - "-d " + - "--rm " + - "--privileged " + - "test/image " + - "arg1 " + - "arg2 ", - } - - c := NewContainer("", cfg).Parse() - - assert.NoError(c.Err, "") - assert.Equal(cfg.Id, "c1", "Id doesn't match") - assert.Equal(c.Name, "c1", "Name doesn't match") - assert.True(c.remove, "Remove doesn't match") - assert.True(c.detach, "Detach doesn't match") - assert.Equal(c.Config.Cmd.Len(), 2, "Args doesn't match") - assert.Equal(c.Config.Cmd.Slice()[0], "arg1", "Arg1 doesn't match") - assert.Equal(c.Config.Cmd.Slice()[1], "arg2", "Arg2 doesn't match") - assert.True(c.HostConfig.Privileged, "Privileged doesn't match") -} - -func TestIdFromName(t *testing.T) { - assert := require.New(t) - - cfg := &config.ContainerConfig{ - Cmd: "--name foo -v /test busybox echo hi", - } - - assert.Equal("", cfg.Id) - NewContainer(config.DOCKER_HOST, cfg) - assert.Equal("foo", cfg.Id) -} - -func testMigrateVolumes(t *testing.T) { - assert := require.New(t) - - c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - Cmd: "--name foo -v /test busybox echo hi", - }).Parse().Start().Lookup() - - assert.NoError(c.Err, "") - - test_path, ok := c.Container.Volumes["/test"] - assert.True(ok, "") - - c2 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - MigrateVolumes: true, - Cmd: "--name foo -v /test2 busybox echo hi", - }).Parse().Start().Lookup() - - assert.NoError(c2.Err, "") - - assert.True(c2.Container != nil) - - _, ok = c2.Container.Volumes["/test2"] - assert.True(ok, "") - assert.Equal(test_path, c2.Container.Volumes["/test"]) - - c.Delete() - c2.Delete() -} - -func testRollback(t *testing.T) { - assert := require.New(t) - - c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - Cmd: "--name rollback busybox echo hi", - }).Parse().Start().Lookup() - - assert.NoError(c.Err, "") - assert.Equal("rollback", c.Container.Name) - - c2 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - Cmd: "--name rollback busybox echo bye", - }).Parse().Start().Lookup() - - assert.Equal("rollback", c2.Container.Name) - assert.NoError(c2.Err, "") - assert.NotEqual(c.Container.ID, c2.Container.ID) - - c3 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - Cmd: "--name rollback busybox echo hi", - }).Parse().Start().Lookup() - - assert.NoError(c3.Err, "") - assert.Equal(c.Container.ID, c3.Container.ID) - assert.Equal("rollback", c3.Container.Name) - - c2.Reset().Lookup() - assert.NoError(c2.Err, "") - assert.True(strings.HasPrefix(c2.Container.Name, "rollback-")) - - c.Delete() - c2.Delete() -} - -func testStart(t *testing.T) { - assert := require.New(t) - - c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - Cmd: "--pid=host --privileged --rm busybox echo hi", - }).Parse().Start().Lookup() - - assert.NoError(c.Err, "") - - assert.True(c.HostConfig.Privileged, "") - assert.True(c.Container.HostConfig.Privileged, "") - assert.Equal("host", c.Container.HostConfig.PidMode, "") - - c.Delete() -} - -func testLookup(t *testing.T) { - assert := require.New(t) - - cfg := &config.ContainerConfig{ - Cmd: "--rm busybox echo hi", - } - c := NewContainer(config.DOCKER_HOST, cfg).Parse().Start() - - cfg2 := &config.ContainerConfig{ - Cmd: "--rm busybox echo hi2", - } - c2 := NewContainer(config.DOCKER_HOST, cfg2).Parse().Start() - - assert.NoError(c.Err, "") - assert.NoError(c2.Err, "") - - c1Lookup := NewContainer(config.DOCKER_HOST, cfg).Lookup() - c2Lookup := NewContainer(config.DOCKER_HOST, cfg2).Lookup() - - assert.NoError(c1Lookup.Err, "") - assert.NoError(c2Lookup.Err, "") - - assert.Equal(c.Container.ID, c1Lookup.Container.ID, "") - assert.Equal(c2.Container.ID, c2Lookup.Container.ID, "") - - c.Delete() - c2.Delete() -} - -func testDelete(t *testing.T) { - assert := require.New(t) - - c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{ - Cmd: "--rm busybox echo hi", - }).Parse() - - assert.False(c.Exists()) - assert.NoError(c.Err, "") - - c.Start() - assert.NoError(c.Err, "") - c.Reset() - assert.NoError(c.Err, "") - - assert.True(c.Exists()) - assert.NoError(c.Err, "") - - c.Delete() - assert.NoError(c.Err, "") - - c.Reset() - assert.False(c.Exists()) - assert.NoError(c.Err, "") -} - -func testDockerClientNames(t *testing.T) { - assert := require.New(t) - client, err := dockerClient.NewClient(config.DOCKER_HOST) - - assert.NoError(err, "") - - c, err := client.CreateContainer(dockerClient.CreateContainerOptions{ - Name: "foo", - Config: &dockerClient.Config{ - Image: "ubuntu", - }, - }) - - assert.NoError(err, "") - assert.Equal("foo", c.Name) - - c2, err := client.InspectContainer(c.ID) - - assert.NoError(err, "") - assert.Equal("/foo", c2.Name) - - c2, err = inspect(client, c.ID) - - assert.NoError(err, "") - assert.Equal("foo", c2.Name) - - client.RemoveContainer(dockerClient.RemoveContainerOptions{ - ID: c2.ID, - Force: true, - }) -} diff --git a/docker/env.go b/docker/env.go new file mode 100644 index 00000000..ac60546e --- /dev/null +++ b/docker/env.go @@ -0,0 +1,55 @@ +package docker + +import ( + "fmt" + "strings" + + "github.com/docker/libcompose/project" + "github.com/rancherio/os/config" +) + +type ConfigEnvironment struct { + cfg *config.CloudConfig +} + +func NewConfigEnvironment(cfg *config.CloudConfig) *ConfigEnvironment { + return &ConfigEnvironment{ + cfg: cfg, + } +} + +func appendEnv(array []string, key, value string) []string { + parts := strings.SplitN(key, "/", 2) + if len(parts) == 2 { + key = parts[1] + } + + return append(array, fmt.Sprintf("%s=%s", key, value)) +} + +func lookupKeys(cfg *config.CloudConfig, keys ...string) []string { + for _, key := range keys { + if strings.HasSuffix(key, "*") { + result := []string{} + for envKey, envValue := range cfg.Rancher.Environment { + keyPrefix := key[:len(key)-1] + if strings.HasPrefix(envKey, keyPrefix) { + result = appendEnv(result, envKey, envValue) + } + } + + if len(result) > 0 { + return result + } + } else if value, ok := cfg.Rancher.Environment[key]; ok { + return appendEnv([]string{}, key, value) + } + } + + return []string{} +} + +func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string { + fullKey := fmt.Sprintf("%s/%s", serviceName, key) + return lookupKeys(c.cfg, fullKey, key) +} diff --git a/docker/factory.go b/docker/factory.go deleted file mode 100644 index 5d394fd0..00000000 --- a/docker/factory.go +++ /dev/null @@ -1,103 +0,0 @@ -package docker - -import ( - log "github.com/Sirupsen/logrus" - - "github.com/rancherio/os/config" - "github.com/rancherio/os/util" - "github.com/rancherio/rancher-compose/librcompose/project" -) - -type ContainerFactory struct { - cfg *config.CloudConfig -} - -type containerBasedService struct { - project.EmptyService - name string - project *project.Project - container *Container - serviceConfig *project.ServiceConfig - cfg *config.CloudConfig -} - -func NewContainerFactory(cfg *config.CloudConfig) *ContainerFactory { - return &ContainerFactory{ - cfg: cfg, - } -} - -func (c *containerBasedService) Up() error { - container := c.container - containerCfg := c.container.ContainerCfg - - fakeCreate := false - create := containerCfg.CreateOnly - - if util.Contains(c.cfg.Rancher.Disable, c.name) { - fakeCreate = true - } - - var event project.Event - - c.project.Notify(project.CONTAINER_STARTING, c.name, map[string]string{}) - - if fakeCreate { - event = project.CONTAINER_CREATED - } else if create { - container.Create() - event = project.CONTAINER_CREATED - } else { - container.StartAndWait() - event = project.CONTAINER_STARTED - } - - if container.Err != nil { - log.Errorf("Failed to run %v: %v", containerCfg.Id, container.Err) - } - - if container.Err == nil && containerCfg.ReloadConfig { - return project.ErrRestart - } - - if container.Container != nil { - c.project.Notify(event, c.name, map[string]string{ - project.CONTAINER_ID: container.Container.ID, - }) - } - - return container.Err -} - -func (c *containerBasedService) Config() *project.ServiceConfig { - return c.serviceConfig -} - -func (c *containerBasedService) Name() string { - return c.name -} - -func isSystemService(serviceConfig *project.ServiceConfig) bool { - return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM -} - -func (c *ContainerFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { - host := config.DOCKER_HOST - if isSystemService(serviceConfig) { - host = config.DOCKER_SYSTEM_HOST - } - - container := NewContainerFromService(host, name, serviceConfig) - - if container.Err != nil { - return nil, container.Err - } - - return &containerBasedService{ - name: name, - project: project, - container: container, - serviceConfig: serviceConfig, - cfg: c.cfg, - }, nil -} diff --git a/docker/service.go b/docker/service.go new file mode 100644 index 00000000..62cb8204 --- /dev/null +++ b/docker/service.go @@ -0,0 +1,159 @@ +package docker + +import ( + "fmt" + + "github.com/docker/libcompose/docker" + "github.com/docker/libcompose/project" + "github.com/docker/machine/log" + "github.com/rancherio/os/config" + "github.com/samalba/dockerclient" +) + +type Service struct { + *docker.Service + deps map[string][]string + context *docker.Context +} + +func NewService(factory *ServiceFactory, name string, serviceConfig *project.ServiceConfig, context *docker.Context) *Service { + return &Service{ + Service: docker.NewService(name, serviceConfig, context), + deps: factory.Deps, + context: context, + } +} + +func (s *Service) DependentServices() []project.ServiceRelationship { + rels := s.Service.DependentServices() + for _, dep := range s.deps[s.Name()] { + rels = appendLink(rels, dep, true) + } + + if s.requiresSyslog() { + rels = appendLink(rels, "syslog", false) + } + + if s.requiresUserDocker() { + // Linking to cloud-init is a hack really. The problem is we need to link to something + // that will trigger a reload + rels = appendLink(rels, "cloud-init", false) + } else if s.missingImage() { + rels = appendLink(rels, "network", false) + } + return rels +} + +func (s *Service) missingImage() bool { + image := s.Config().Image + if image == "" { + return false + } + client := s.context.ClientFactory.Create(s) + i, err := client.InspectImage(s.Config().Image) + return err != nil || i == nil +} + +func (s *Service) requiresSyslog() bool { + return s.Config().LogDriver == "syslog" +} + +func (s *Service) requiresUserDocker() bool { + return s.Config().Labels.MapParts()[config.SCOPE] != config.SYSTEM +} + +func appendLink(deps []project.ServiceRelationship, name string, optional bool) []project.ServiceRelationship { + rel := project.NewServiceRelationship(name, project.REL_TYPE_LINK) + rel.Optional = optional + return append(deps, rel) +} + +func (s *Service) Up() error { + labels := s.Config().Labels.MapParts() + + if err := s.Service.Create(); err != nil { + return err + } + if err := s.rename(); err != nil { + return err + } + if labels[config.CREATE_ONLY] == "true" { + return s.checkReload(labels) + } + if err := s.Service.Up(); err != nil { + return err + } + if labels[config.DETACH] == "false" { + if err := s.wait(); err != nil { + return err + } + } + + return s.checkReload(labels) +} + +func (s *Service) checkReload(labels map[string]string) error { + if labels[config.RELOAD_CONFIG] == "true" { + return project.ErrRestart + } + return nil +} + +func (s *Service) Create() error { + if err := s.Service.Create(); err != nil { + return err + } + return s.rename() +} + +func (s *Service) getContainer() (dockerclient.Client, *dockerclient.ContainerInfo, error) { + containers, err := s.Service.Containers() + if err != nil { + return nil, nil, err + } + + if len(containers) == 0 { + return nil, nil, nil + } + + id, err := containers[0].Id() + if err != nil { + return nil, nil, err + } + + client := s.context.ClientFactory.Create(s) + info, err := client.InspectContainer(id) + return client, info, err +} + +func (s *Service) wait() error { + client, info, err := s.getContainer() + if err != nil || info == nil { + return err + } + + status := <-client.Wait(info.Id) + if status.Error != nil { + return status.Error + } + + if status.ExitCode == 0 { + return nil + } else { + return fmt.Errorf("ExitCode %d", status.ExitCode) + } +} + +func (s *Service) rename() error { + client, info, err := s.getContainer() + if err != nil || info == nil { + return err + } + + if len(info.Name) > 0 && info.Name[1:] != s.Name() { + log.Debugf("Renaming container %s => %s", info.Name[1:], s.Name()) + return client.RenameContainer(info.Name[1:], s.Name()) + } else { + return nil + } +} diff --git a/docker/service_factory.go b/docker/service_factory.go new file mode 100644 index 00000000..cedcef0c --- /dev/null +++ b/docker/service_factory.go @@ -0,0 +1,27 @@ +package docker + +import ( + "github.com/docker/libcompose/docker" + "github.com/docker/libcompose/project" + "github.com/rancherio/os/util" +) + +type ServiceFactory struct { + Context *docker.Context + Deps map[string][]string +} + +func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { + if after := serviceConfig.Labels.MapParts()["io.rancher.os.after"]; after != "" { + for _, dep := range util.TrimSplit(after, ",") { + s.Deps[name] = append(s.Deps[name], dep) + } + } + if before := serviceConfig.Labels.MapParts()["io.rancher.os.before"]; before != "" { + for _, dep := range util.TrimSplit(before, ",") { + s.Deps[dep] = append(s.Deps[dep], name) + } + } + + return NewService(s, name, serviceConfig, s.Context), nil +} diff --git a/docker/services.go b/docker/services.go deleted file mode 100644 index babeebac..00000000 --- a/docker/services.go +++ /dev/null @@ -1,137 +0,0 @@ -package docker - -import ( - "fmt" - "strings" - - log "github.com/Sirupsen/logrus" - "github.com/rancherio/os/config" - "github.com/rancherio/os/util" - "github.com/rancherio/rancher-compose/librcompose/project" -) - -type configEnvironment struct { - cfg *config.CloudConfig -} - -func appendEnv(array []string, key, value string) []string { - parts := strings.SplitN(key, "/", 2) - if len(parts) == 2 { - key = parts[1] - } - - return append(array, fmt.Sprintf("%s=%s", key, value)) -} - -func lookupKeys(cfg *config.CloudConfig, keys ...string) []string { - for _, key := range keys { - if strings.HasSuffix(key, "*") { - result := []string{} - for envKey, envValue := range cfg.Rancher.Environment { - keyPrefix := key[:len(key)-1] - if strings.HasPrefix(envKey, keyPrefix) { - result = appendEnv(result, envKey, envValue) - } - } - - if len(result) > 0 { - return result - } - } else if value, ok := cfg.Rancher.Environment[key]; ok { - return appendEnv([]string{}, key, value) - } - } - - return []string{} -} - -func (c *configEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string { - fullKey := fmt.Sprintf("%s/%s", serviceName, key) - return lookupKeys(c.cfg, fullKey, key) -} - -func RunServices(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) error { - network := false - projectEvents := make(chan project.ProjectEvent) - p := project.NewProject(name, NewContainerFactory(cfg)) - p.EnvironmentLookup = &configEnvironment{cfg: cfg} - p.AddListener(projectEvents) - enabled := make(map[string]bool) - - for name, serviceConfig := range configs { - if err := p.AddConfig(name, serviceConfig); err != nil { - log.Infof("Failed loading service %s", name) - continue - } - enabled[name] = true - } - - p.ReloadCallback = func() error { - if p.Name != "system-init" { - return nil - } - - if err := cfg.Reload(); err != nil { - return err - } - - for service, serviceEnabled := range cfg.Rancher.ServicesInclude { - if !serviceEnabled { - continue - } - - if en, ok := enabled[service]; ok && en { - continue - } - - bytes, err := LoadServiceResource(service, network, cfg) - if err != nil { - if err == util.ErrNoNetwork { - log.Debugf("Can not load %s, networking not enabled", service) - } else { - log.Errorf("Failed to load %s : %v", service, err) - } - continue - } - - if err := p.Load(bytes); err != nil { - log.Errorf("Failed to load %s : %v", service, err) - continue - } - - enabled[service] = true - } - - for service, config := range cfg.Rancher.Services { - if en, ok := enabled[service]; ok && en { - continue - } - - if err := p.AddConfig(service, config); err != nil { - log.Errorf("Failed to load %s : %v", service, err) - continue - } - enabled[service] = true - } - - return nil - } - - go func() { - for event := range projectEvents { - if event.Event == project.CONTAINER_STARTED && event.ServiceName == "network" { - network = true - } - } - }() - - if err := p.ReloadCallback(); err != nil { - log.Errorf("Failed to reload %s : %v", name, err) - return err - } - return p.Up() -} - -func LoadServiceResource(name string, network bool, cfg *config.CloudConfig) ([]byte, error) { - return util.LoadResource(name, network, cfg.Rancher.Repositories.ToArray()) -} diff --git a/docker/util.go b/docker/util.go new file mode 100644 index 00000000..d2079c72 --- /dev/null +++ b/docker/util.go @@ -0,0 +1,11 @@ +package docker + +import ( + "github.com/docker/libcompose/project" + "github.com/rancherio/os/config" +) + +func IsSystemContainer(serviceConfig *project.ServiceConfig) bool { + return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM + +} diff --git a/init/bootstrap.go b/init/bootstrap.go index 1ed20b73..3a8288b4 100644 --- a/init/bootstrap.go +++ b/init/bootstrap.go @@ -8,11 +8,11 @@ import ( "strings" log "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/project" "github.com/rancher/docker-from-scratch" + "github.com/rancherio/os/compose" "github.com/rancherio/os/config" - "github.com/rancherio/os/docker" "github.com/rancherio/os/util" - "github.com/rancherio/rancher-compose/librcompose/project" ) func autoformat(cfg *config.CloudConfig) error { @@ -23,16 +23,17 @@ func autoformat(cfg *config.CloudConfig) error { FORMATZERO := "FORMATZERO=" + fmt.Sprint(cfg.Rancher.State.FormatZero) cfg.Rancher.Autoformat["autoformat"].Environment = project.NewMaporEqualSlice([]string{AUTOFORMAT, FORMATZERO}) log.Info("Running Autoformat services") - err := docker.RunServices("autoformat", cfg, cfg.Rancher.Autoformat) + _, err := compose.RunServiceSet("autoformat", cfg, cfg.Rancher.Autoformat) return err } func runBootstrapContainers(cfg *config.CloudConfig) error { log.Info("Running Bootstrap services") - return docker.RunServices("bootstrap", cfg, cfg.Rancher.BootstrapContainers) + _, err := compose.RunServiceSet("bootstrap", cfg, cfg.Rancher.BootstrapContainers) + return err } -func startDocker(cfg *config.Config) (chan interface{}, error) { +func startDocker(cfg *config.CloudConfig) (chan interface{}, error) { launchConfig, args := getLaunchConfig(cfg, &cfg.Rancher.BootstrapDocker) launchConfig.Fork = true diff --git a/init/init.go b/init/init.go index deb28a70..43bc801a 100644 --- a/init/init.go +++ b/init/init.go @@ -30,7 +30,7 @@ var ( } ) -func loadModules(cfg *config.Config) error { +func loadModules(cfg *config.CloudConfig) error { mounted := map[string]bool{} f, err := os.Open("/proc/modules") @@ -44,7 +44,7 @@ func loadModules(cfg *config.Config) error { mounted[strings.SplitN(reader.Text(), " ", 2)[0]] = true } - for _, module := range cfg.Modules { + for _, module := range cfg.Rancher.Modules { if mounted[module] { continue } @@ -58,7 +58,7 @@ func loadModules(cfg *config.Config) error { return nil } -func sysInit(cfg *config.Config) error { +func sysInit(cfg *config.CloudConfig) error { args := append([]string{config.SYSINIT_BIN}, os.Args[1:]...) cmd := &exec.Cmd{ @@ -83,18 +83,18 @@ func MainInit() { } } -func mountState(cfg *config.Config) error { +func mountState(cfg *config.CloudConfig) error { var err error - if cfg.State.Dev == "" { + if cfg.Rancher.State.Dev == "" { return nil } - dev := util.ResolveDevice(cfg.State.Dev) + dev := util.ResolveDevice(cfg.Rancher.State.Dev) if dev == "" { - return fmt.Errorf("Could not resolve device %q", cfg.State.Dev) + return fmt.Errorf("Could not resolve device %q", cfg.Rancher.State.Dev) } - fsType := cfg.State.FsType + fsType := cfg.Rancher.State.FsType if fsType == "auto" { fsType, err = util.GetFsType(dev) } @@ -108,7 +108,7 @@ func mountState(cfg *config.Config) error { return util.Mount(dev, STATE, fsType, "") } -func tryMountState(cfg *config.Config) error { +func tryMountState(cfg *config.CloudConfig) error { if mountState(cfg) == nil { return nil } @@ -121,8 +121,8 @@ func tryMountState(cfg *config.Config) error { return mountState(cfg) } -func tryMountAndBootstrap(cfg *config.Config) error { - if err := tryMountState(cfg); !cfg.State.Required && err != nil { +func tryMountAndBootstrap(cfg *config.CloudConfig) error { + if err := tryMountState(cfg); !cfg.Rancher.State.Required && err != nil { return nil } else if err != nil { return err @@ -132,15 +132,15 @@ func tryMountAndBootstrap(cfg *config.Config) error { return switchRoot(STATE) } -func getLaunchConfig(cfg *config.Config, dockerCfg *config.DockerConfig) (*dockerlaunch.Config, []string) { +func getLaunchConfig(cfg *config.CloudConfig, dockerCfg *config.DockerConfig) (*dockerlaunch.Config, []string) { var launchConfig dockerlaunch.Config args := dockerlaunch.ParseConfig(&launchConfig, append(dockerCfg.Args, dockerCfg.ExtraArgs...)...) - launchConfig.DnsConfig.Nameservers = cfg.Network.Dns.Nameservers - launchConfig.DnsConfig.Search = cfg.Network.Dns.Search + launchConfig.DnsConfig.Nameservers = cfg.Rancher.Network.Dns.Nameservers + launchConfig.DnsConfig.Search = cfg.Rancher.Network.Dns.Search - if !cfg.Debug { + if !cfg.Rancher.Debug { launchConfig.LogFile = config.SYSTEM_DOCKER_LOG } @@ -148,17 +148,17 @@ func getLaunchConfig(cfg *config.Config, dockerCfg *config.DockerConfig) (*docke } func RunInit() error { - var cfg config.Config + var cfg config.CloudConfig os.Setenv("PATH", "/sbin:/usr/sbin:/usr/bin") // Magic setting to tell Docker to do switch_root and not pivot_root os.Setenv("DOCKER_RAMDISK", "true") initFuncs := []config.InitFunc{ - func(cfg *config.Config) error { + func(cfg *config.CloudConfig) error { return dockerlaunch.PrepareFs(&mountConfig) }, - func(cfg *config.Config) error { + func(cfg *config.CloudConfig) error { newCfg, err := config.LoadConfig() if err == nil { newCfg, err = config.LoadConfig() @@ -167,7 +167,7 @@ func RunInit() error { *cfg = *newCfg } - if cfg.Debug { + if cfg.Rancher.Debug { cfgString, _ := config.Dump(false, true) if cfgString != "" { log.Debugf("Config: %s", cfgString) @@ -178,7 +178,7 @@ func RunInit() error { }, loadModules, tryMountAndBootstrap, - func(cfg *config.Config) error { + func(cfg *config.CloudConfig) error { return cfg.Reload() }, loadModules, @@ -189,7 +189,7 @@ func RunInit() error { return err } - launchConfig, args := getLaunchConfig(&cfg, &cfg.SystemDocker) + launchConfig, args := getLaunchConfig(&cfg, &cfg.Rancher.SystemDocker) log.Info("Launching System Docker") _, err := dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...) diff --git a/init/root.go b/init/root.go index 241bd2a4..86aad6fb 100644 --- a/init/root.go +++ b/init/root.go @@ -44,15 +44,14 @@ func copyMoveRoot(rootfs string) error { filename := path.Join("/", file.Name()) if filename == rootfs { + log.Debugf("Skipping Deleting %s", filename) continue } log.Debugf("Deleting %s", filename) - //if err := os.Remove(filename); err != nil { if err := os.RemoveAll(filename); err != nil { return err } - //} } return nil @@ -90,7 +89,7 @@ func switchRoot(rootfs string) error { } log.Debugf("Successfully moved to new root at %s", rootfs) - os.Setenv("DOCKER_RAMDISK", "false") + os.Unsetenv("DOCKER_RAMDISK") return nil } diff --git a/init/sysinit.go b/init/sysinit.go index 40af7591..52b82438 100644 --- a/init/sysinit.go +++ b/init/sysinit.go @@ -7,6 +7,7 @@ import ( log "github.com/Sirupsen/logrus" dockerClient "github.com/fsouza/go-dockerclient" + "github.com/rancherio/os/compose" "github.com/rancherio/os/config" "github.com/rancherio/os/docker" ) @@ -88,42 +89,6 @@ func loadImages(cfg *config.CloudConfig) error { return nil } -func runContainers(cfg *config.CloudConfig) error { - return docker.RunServices("system-init", cfg, cfg.Rancher.Services) -} - -func tailConsole(cfg *config.CloudConfig) error { - if !cfg.Rancher.Console.Tail { - return nil - } - - client, err := docker.NewSystemClient() - if err != nil { - return err - } - - console, ok := cfg.Rancher.Services[config.CONSOLE_CONTAINER] - if !ok { - log.Error("Console not found") - return nil - } - - c := docker.NewContainerFromService(config.DOCKER_SYSTEM_HOST, config.CONSOLE_CONTAINER, console) - if c.Err != nil { - return c.Err - } - - log.Infof("Tailing console : %s", c.Name) - return client.Logs(dockerClient.LogsOptions{ - Container: c.Name, - Stdout: true, - Stderr: true, - Follow: true, - OutputStream: os.Stdout, - ErrorStream: os.Stderr, - }) -} - func SysInit() error { cfg, err := config.LoadConfig() if err != nil { @@ -132,7 +97,9 @@ func SysInit() error { initFuncs := []config.InitFunc{ loadImages, - runContainers, + func(cfg *config.CloudConfig) error { + return compose.RunServices(cfg) + }, func(cfg *config.CloudConfig) error { syscall.Sync() return nil @@ -141,7 +108,6 @@ func SysInit() error { log.Infof("RancherOS %s started", config.VERSION) return nil }, - tailConsole, } return config.RunInitFuncs(cfg, initFuncs) diff --git a/main.go b/main.go index 3b8546af..15a8bd81 100644 --- a/main.go +++ b/main.go @@ -6,6 +6,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/reexec" + dockerlaunchMain "github.com/rancher/docker-from-scratch/main" "github.com/rancherio/os/cmd/cloudinit" "github.com/rancherio/os/cmd/control" "github.com/rancherio/os/cmd/network" @@ -39,6 +40,7 @@ func registerCmd(cmd string, mainFunc func()) { func main() { registerCmd("/init", osInit.MainInit) registerCmd(config.SYSINIT_BIN, sysinit.Main) + registerCmd("/usr/bin/dockerlaunch", dockerlaunchMain.Main) registerCmd("/usr/bin/system-docker", systemdocker.Main) registerCmd("/sbin/poweroff", power.PowerOff) registerCmd("/sbin/reboot", power.Reboot) diff --git a/os-config.yml b/os-config.yml index 206aecfa..ea67bd1f 100644 --- a/os-config.yml +++ b/os-config.yml @@ -27,8 +27,7 @@ rancher: labels: io.rancher.os.detach: false io.rancher.os.scope: system - links: - - autoformat + io.rancher.os.after: autoformat log_driver: json-file net: host uts: host @@ -89,10 +88,7 @@ rancher: io.rancher.os.detach: false io.rancher.os.reloadconfig: true io.rancher.os.scope: system - links: - - preload-user-images - - cloud-init-pre - - network + io.rancher.os.after: cloud-init-pre,network net: host uts: host privileged: true @@ -107,8 +103,7 @@ rancher: io.rancher.os.detach: false io.rancher.os.reloadconfig: true io.rancher.os.scope: system - links: - - preload-system-images + io.rancher.os.after: preload-system-images net: host uts: host privileged: true @@ -136,15 +131,13 @@ rancher: - /usr/bin/ros:/usr/bin/respawn:ro - /usr/bin/ros:/usr/bin/system-docker:ro - /usr/bin/ros:/usr/sbin/wait-for-docker:ro - - /lib/modules:/lib/modules + - /usr/bin/ros:/usr/sbin/dockerlaunch:ro - /usr/bin/docker:/usr/bin/docker:ro console: image: rancher/os-console:v0.4.0-dev labels: - io.rancher.os.remove: true io.rancher.os.scope: system - links: - - cloud-init + io.rancher.os.after: cloud-init net: host uts: host pid: host @@ -157,8 +150,7 @@ rancher: image: rancher/os-docker:v0.4.0-dev labels: io.rancher.os.scope: system - links: - - network + io.rancher.os.after: network net: host uts: host pid: host @@ -177,27 +169,14 @@ rancher: privileged: true read_only: true volumes: - - /var/lib/rancher/conf:/var/lib/rancher/conf - /var/lib/docker:/var/lib/docker - /var/lib/system-docker:/var/lib/system-docker - dockerwait: - image: rancher/os-dockerwait:v0.4.0-dev - labels: - io.rancher.os.detach: false - io.rancher.os.scope: system - links: - - docker - net: host - uts: host - volumes_from: - - all-volumes network: image: rancher/os-network:v0.4.0-dev labels: io.rancher.os.detach: false io.rancher.os.scope: system - links: - - cloud-init-pre + io.rancher.os.after: cloud-init-pre net: host uts: host privileged: true @@ -208,9 +187,7 @@ rancher: image: rancher/os-ntp:v0.4.0-dev labels: io.rancher.os.scope: system - links: - - cloud-init - - network + io.rancher.os.after: cloud-init, network net: host uts: host privileged: true @@ -231,9 +208,6 @@ rancher: image: rancher/os-preload:v0.4.0-dev labels: io.rancher.os.detach: false - io.rancher.os.scope: system - links: - - dockerwait privileged: true volumes: - /var/run/docker.sock:/var/run/docker.sock @@ -263,7 +237,7 @@ rancher: read_only: true volumes: - /dev:/host/dev - - /os-config.yml:/os-config.yml + - /usr/share/ros/os-config.yml:/usr/share/ros/os-config.yml - /var/lib/rancher:/var/lib/rancher - /var/lib/rancher/conf:/var/lib/rancher/conf - /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt.rancher diff --git a/scripts/run b/scripts/run index b233d586..1fd16a62 100755 --- a/scripts/run +++ b/scripts/run @@ -99,7 +99,7 @@ else done fi -KERNEL_ARGS="rancher.password=rancher console=ttyS0 ${QEMU_APPEND}" +KERNEL_ARGS="rancher.password=rancher rancher.modules=[9p,9pnet_virtio] console=ttyS0 ${QEMU_APPEND}" if [ "$UNAME" == "Darwin" ] && [ -x $(which xhyve) ]; then diff --git a/util/backoff.go b/util/backoff.go new file mode 100644 index 00000000..aa4adce7 --- /dev/null +++ b/util/backoff.go @@ -0,0 +1,54 @@ +package util + +import "time" + +type Backoff struct { + StartMillis, MaxIntervalMillis, MaxMillis int + c chan bool + done chan bool +} + +func (b *Backoff) Start() <-chan bool { + b.c = make(chan bool) + b.done = make(chan bool) + go b.backoff() + return b.c +} + +func (b *Backoff) Close() error { + b.done <- true + return nil +} + +func (b *Backoff) backoff() { + if b.StartMillis == 0 && b.MaxIntervalMillis == 0 { + b.StartMillis = 100 + b.MaxIntervalMillis = 2000 + b.MaxMillis = 300000 + } + + start := time.Now() + currentMillis := b.StartMillis + + for { + writeVal := true + if time.Now().Sub(start) > (time.Duration(b.MaxMillis) * time.Millisecond) { + b.c <- false + } + + select { + case <-b.done: + close(b.done) + close(b.c) + return + case b.c <- writeVal: + } + + time.Sleep(time.Duration(currentMillis) * time.Millisecond) + + currentMillis *= 2 + if currentMillis > b.MaxIntervalMillis { + currentMillis = b.MaxIntervalMillis + } + } +} diff --git a/util/util.go b/util/util.go index c9e5be03..e4482b75 100644 --- a/util/util.go +++ b/util/util.go @@ -345,3 +345,16 @@ func KVPairs2Map(kvs []string) map[string]string { } return r } + +func TrimSplitN(str, sep string, count int) []string { + result := []string{} + for _, part := range strings.SplitN(strings.TrimSpace(str), sep, count) { + result = append(result, strings.TrimSpace(part)) + } + + return result +} + +func TrimSplit(str, sep string) []string { + return TrimSplitN(str, sep, -1) +} From d56b6ae2a5f292ce1be1bdc4f3f4429fc2eb1594 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Tue, 4 Aug 2015 23:07:55 -0700 Subject: [PATCH 4/9] Update godeps --- Godeps/Godeps.json | 287 +- .../github.com/docker/distribution/.drone.yml | 38 + .../github.com/docker/distribution/.gitignore | 37 + .../github.com/docker/distribution/.mailmap | 6 + .../github.com/docker/distribution/AUTHORS | 61 + .../docker/distribution/CONTRIBUTING.md | 139 + .../github.com/docker/distribution/Dockerfile | 19 + .../docker/distribution/Godeps/Godeps.json | 135 + .../docker/distribution/Godeps/Readme | 5 + .../github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/MAINTAINERS | 4 + .../github.com/docker/distribution/Makefile | 74 + .../github.com/docker/distribution/README.md | 129 + .../github.com/docker/distribution/ROADMAP.md | 273 ++ .../github.com/docker/distribution/blobs.go | 205 + .../github.com/docker/distribution/circle.yml | 127 + .../docker/distribution/cmd/dist/list.go | 14 + .../docker/distribution/cmd/dist/main.go | 21 + .../docker/distribution/cmd/dist/pull.go | 21 + .../docker/distribution/cmd/dist/push.go | 21 + .../registry-api-descriptor-template/main.go | 127 + .../cmd/registry/config-cache.yml | 48 + .../distribution/cmd/registry/config-dev.yml | 60 + .../cmd/registry/config-example.yml | 11 + .../docker/distribution/cmd/registry/main.go | 313 ++ .../docker/distribution/cmd/registry/rados.go | 5 + .../configuration/configuration.go | 486 +++ .../configuration/configuration_test.go | 370 ++ .../distribution/configuration/parser.go | 203 + .../docker/distribution/context/context.go | 85 + .../docker/distribution/context/doc.go | 76 + .../docker/distribution/context/http.go | 342 ++ .../docker/distribution/context/http_test.go | 292 ++ .../docker/distribution/context/logger.go | 101 + .../docker/distribution/context/trace.go | 104 + .../docker/distribution/context/trace_test.go | 85 + .../docker/distribution/context/util.go | 32 + .../distribution/contrib/apache/README.MD | 36 + .../distribution/contrib/apache/apache.conf | 127 + .../distribution/contrib/ceph/ci-setup.sh | 119 + .../distribution/contrib/compose/README.md | 147 + .../contrib/compose/docker-compose.yml | 15 + .../contrib/compose/nginx/Dockerfile | 6 + .../compose/nginx/docker-registry-v2.conf | 6 + .../compose/nginx/docker-registry.conf | 7 + .../contrib/compose/nginx/nginx.conf | 27 + .../contrib/compose/nginx/registry.conf | 41 + .../contrib/docker-integration/Dockerfile | 46 + .../contrib/docker-integration/README.md | 138 + .../docker-integration/docker-compose.yml | 27 + .../docker-integration/install_certs.sh | 38 + .../docker-integration/nginx/Dockerfile | 10 + .../nginx/docker-registry-v2.conf | 6 + .../nginx/docker-registry.conf | 7 + .../docker-integration/nginx/nginx.conf | 27 + .../nginx/registry-basic.conf | 13 + .../nginx/registry-noauth.conf | 8 + .../docker-integration/nginx/registry.conf | 277 ++ .../docker-integration/nginx/test.passwd | 1 + .../contrib/docker-integration/run.sh | 31 + .../docker-integration/run_multiversion.sh | 77 + .../contrib/docker-integration/test_runner.sh | 50 + .../contrib/docker-integration/tls.bats | 102 + .../docker/distribution/digest/digest.go | 168 + .../docker/distribution/digest/digest_test.go | 111 + .../docker/distribution/digest/digester.go | 95 + .../digest/digester_resumable_test.go | 21 + .../docker/distribution/digest/doc.go | 52 + .../docker/distribution/digest/set.go | 195 + .../docker/distribution/digest/set_test.go | 272 ++ .../docker/distribution/digest/tarsum.go | 70 + .../docker/distribution/digest/tarsum_test.go | 79 + .../docker/distribution/digest/verifiers.go | 122 + .../distribution/digest/verifiers_test.go | 162 + .../src/github.com/docker/distribution/doc.go | 7 + .../docker/distribution/docs/Dockerfile | 26 + .../docker/distribution/docs/Makefile | 55 + .../docker/distribution/docs/architecture.md | 54 + .../distribution/docs/authentication.md | 185 + .../docker/distribution/docs/building.md | 157 + .../docker/distribution/docs/configuration.md | 1630 ++++++++ .../docker/distribution/docs/deploying.md | 177 + .../docker/distribution/docs/glossary.md | 70 + .../docker/distribution/docs/help.md | 24 + .../docs/images/notifications.gliffy | 1 + .../docs/images/notifications.png | Bin 0 -> 37836 bytes .../docs/images/notifications.svg | 1 + .../distribution/docs/images/registry.gliffy | 1 + .../distribution/docs/images/registry.png | Bin 0 -> 24298 bytes .../distribution/docs/images/registry.svg | 1 + .../docker/distribution/docs/index.md | 63 + .../docker/distribution/docs/introduction.md | 59 + .../docker/distribution/docs/migration.md | 30 + .../docker/distribution/docs/mirror.md | 62 + .../docker/distribution/docs/mkdocs.yml | 18 + .../docker/distribution/docs/notifications.md | 323 ++ .../distribution/docs/osx-setup-guide.md | 62 + .../docs/osx/com.docker.registry.plist | 42 + .../docker/distribution/docs/osx/config.yml | 16 + .../docker/distribution/docs/spec/api.md | 3450 +++++++++++++++++ .../docker/distribution/docs/spec/api.md.tmpl | 1120 ++++++ .../distribution/docs/spec/auth/token.md | 425 ++ .../distribution/docs/spec/implementations.md | 26 + .../docker/distribution/docs/spec/json.md | 88 + .../distribution/docs/spec/manifest-v2-1.md | 153 + .../docs/storage-drivers/azure.md | 24 + .../docs/storage-drivers/filesystem.md | 16 + .../docs/storage-drivers/inmemory.md | 18 + .../distribution/docs/storage-drivers/oss.md | 31 + .../docs/storage-drivers/rados.md | 40 + .../distribution/docs/storage-drivers/s3.md | 34 + .../docs/storage-drivers/swift.md | 139 + .../distribution/docs/storagedrivers.md | 61 + .../github.com/docker/distribution/errors.go | 82 + .../docker/distribution/health/api/api.go | 37 + .../distribution/health/api/api_test.go | 86 + .../distribution/health/checks/checks.go | 35 + .../distribution/health/checks/checks_test.go | 25 + .../docker/distribution/health/doc.go | 130 + .../docker/distribution/health/health.go | 217 ++ .../docker/distribution/health/health_test.go | 47 + .../docker/distribution/manifest/manifest.go | 124 + .../distribution/manifest/manifest_test.go | 110 + .../docker/distribution/manifest/sign.go | 66 + .../docker/distribution/manifest/verify.go | 32 + .../distribution/notifications/bridge.go | 155 + .../distribution/notifications/bridge_test.go | 166 + .../distribution/notifications/endpoint.go | 86 + .../distribution/notifications/event.go | 152 + .../distribution/notifications/event_test.go | 157 + .../docker/distribution/notifications/http.go | 147 + .../distribution/notifications/http_test.go | 157 + .../distribution/notifications/listener.go | 205 + .../notifications/listener_test.go | 185 + .../distribution/notifications/metrics.go | 152 + .../distribution/notifications/sinks.go | 337 ++ .../distribution/notifications/sinks_test.go | 223 ++ .../distribution/project/dev-image/Dockerfile | 20 + .../distribution/project/hooks/README.md | 6 + .../project/hooks/configure-hooks.sh | 18 + .../distribution/project/hooks/pre-commit | 29 + .../docker/distribution/registry.go | 120 + .../registry/api/errcode/errors.go | 259 ++ .../registry/api/errcode/errors_test.go | 179 + .../registry/api/errcode/handler.go | 44 + .../registry/api/errcode/register.go | 86 + .../registry/api/v2/descriptors.go | 1550 ++++++++ .../distribution/registry/api/v2/doc.go | 9 + .../distribution/registry/api/v2/errors.go | 154 + .../distribution/registry/api/v2/names.go | 83 + .../registry/api/v2/names_test.go | 231 ++ .../distribution/registry/api/v2/routes.go | 49 + .../registry/api/v2/routes_test.go | 355 ++ .../distribution/registry/api/v2/urls.go | 234 ++ .../distribution/registry/api/v2/urls_test.go | 239 ++ .../docker/distribution/registry/auth/auth.go | 142 + .../registry/auth/htpasswd/access.go | 102 + .../registry/auth/htpasswd/access_test.go | 122 + .../registry/auth/htpasswd/htpasswd.go | 80 + .../registry/auth/htpasswd/htpasswd_test.go | 85 + .../registry/auth/silly/access.go | 97 + .../registry/auth/silly/access_test.go | 71 + .../registry/auth/token/accesscontroller.go | 268 ++ .../registry/auth/token/stringset.go | 35 + .../distribution/registry/auth/token/token.go | 343 ++ .../registry/auth/token/token_test.go | 386 ++ .../distribution/registry/auth/token/util.go | 58 + .../registry/client/auth/api_version.go | 58 + .../registry/client/auth/authchallenge.go | 219 ++ .../client/auth/authchallenge_test.go | 38 + .../registry/client/auth/session.go | 256 ++ .../registry/client/auth/session_test.go | 311 ++ .../registry/client/blob_writer.go | 176 + .../registry/client/blob_writer_test.go | 211 + .../distribution/registry/client/errors.go | 69 + .../registry/client/repository.go | 553 +++ .../registry/client/repository_test.go | 859 ++++ .../registry/client/transport/http_reader.go | 173 + .../registry/client/transport/transport.go | 147 + .../docker/distribution/registry/doc.go | 3 + .../registry/handlers/api_test.go | 1380 +++++++ .../distribution/registry/handlers/app.go | 800 ++++ .../registry/handlers/app_test.go | 277 ++ .../registry/handlers/basicauth.go | 11 + .../registry/handlers/basicauth_prego14.go | 41 + .../distribution/registry/handlers/blob.go | 93 + .../registry/handlers/blobupload.go | 327 ++ .../distribution/registry/handlers/catalog.go | 95 + .../distribution/registry/handlers/context.go | 151 + .../distribution/registry/handlers/helpers.go | 62 + .../distribution/registry/handlers/hmac.go | 72 + .../registry/handlers/hmac_test.go | 117 + .../distribution/registry/handlers/hooks.go | 53 + .../distribution/registry/handlers/images.go | 251 ++ .../distribution/registry/handlers/mail.go | 45 + .../distribution/registry/handlers/tags.go | 64 + .../registry/listener/listener.go | 74 + .../middleware/registry/middleware.go | 40 + .../middleware/repository/middleware.go | 40 + .../distribution/registry/proxy/proxyauth.go | 54 + .../registry/proxy/proxyblobstore.go | 214 + .../registry/proxy/proxyblobstore_test.go | 231 ++ .../registry/proxy/proxymanifeststore.go | 155 + .../registry/proxy/proxymanifeststore_test.go | 235 ++ .../registry/proxy/proxymetrics.go | 74 + .../registry/proxy/proxyregistry.go | 139 + .../registry/proxy/scheduler/scheduler.go | 250 ++ .../proxy/scheduler/scheduler_test.go | 165 + .../registry/storage/blob_test.go | 407 ++ .../registry/storage/blobcachemetrics.go | 60 + .../registry/storage/blobserver.go | 79 + .../registry/storage/blobstore.go | 198 + .../registry/storage/blobwriter.go | 379 ++ .../storage/blobwriter_nonresumable.go | 17 + .../registry/storage/blobwriter_resumable.go | 175 + .../registry/storage/cache/cache.go | 35 + .../cache/cachedblobdescriptorstore.go | 101 + .../registry/storage/cache/memory/memory.go | 170 + .../storage/cache/memory/memory_test.go | 13 + .../registry/storage/cache/redis/redis.go | 268 ++ .../storage/cache/redis/redis_test.go | 51 + .../registry/storage/cache/suite.go | 178 + .../distribution/registry/storage/catalog.go | 65 + .../registry/storage/catalog_test.go | 122 + .../distribution/registry/storage/doc.go | 3 + .../registry/storage/driver/azure/azure.go | 366 ++ .../storage/driver/azure/azure_test.go | 63 + .../storage/driver/azure/blockblob.go | 24 + .../storage/driver/azure/blockblob_test.go | 155 + .../registry/storage/driver/azure/blockid.go | 60 + .../storage/driver/azure/blockid_test.go | 74 + .../storage/driver/azure/randomwriter.go | 208 + .../storage/driver/azure/randomwriter_test.go | 339 ++ .../storage/driver/azure/zerofillwriter.go | 49 + .../driver/azure/zerofillwriter_test.go | 126 + .../registry/storage/driver/base/base.go | 169 + .../storage/driver/factory/factory.go | 55 + .../registry/storage/driver/fileinfo.go | 79 + .../storage/driver/filesystem/driver.go | 291 ++ .../storage/driver/filesystem/driver_test.go | 26 + .../storage/driver/inmemory/driver.go | 262 ++ .../storage/driver/inmemory/driver_test.go | 19 + .../registry/storage/driver/inmemory/mfs.go | 338 ++ .../middleware/cloudfront/middleware.go | 119 + .../driver/middleware/storagemiddleware.go | 39 + .../registry/storage/driver/oss/doc.go | 3 + .../registry/storage/driver/oss/oss.go | 813 ++++ .../registry/storage/driver/oss/oss_test.go | 144 + .../registry/storage/driver/rados/doc.go | 3 + .../registry/storage/driver/rados/rados.go | 630 +++ .../storage/driver/rados/rados_test.go | 40 + .../registry/storage/driver/s3/s3.go | 826 ++++ .../registry/storage/driver/s3/s3_test.go | 138 + .../registry/storage/driver/storagedriver.go | 125 + .../registry/storage/driver/swift/swift.go | 657 ++++ .../storage/driver/swift/swift_test.go | 135 + .../storage/driver/testsuites/testsuites.go | 1163 ++++++ .../registry/storage/filereader.go | 177 + .../registry/storage/filereader_test.go | 199 + .../registry/storage/filewriter.go | 180 + .../registry/storage/filewriter_test.go | 262 ++ .../registry/storage/linkedblobstore.go | 301 ++ .../registry/storage/manifeststore.go | 144 + .../registry/storage/manifeststore_test.go | 364 ++ .../distribution/registry/storage/paths.go | 509 +++ .../registry/storage/paths_test.go | 146 + .../registry/storage/purgeuploads.go | 138 + .../registry/storage/purgeuploads_test.go | 168 + .../distribution/registry/storage/registry.go | 185 + .../registry/storage/revisionstore.go | 111 + .../registry/storage/signaturestore.go | 141 + .../distribution/registry/storage/tagstore.go | 143 + .../distribution/registry/storage/util.go | 21 + .../distribution/registry/storage/vacuum.go | 67 + .../distribution/registry/storage/walk.go | 51 + .../registry/storage/walk_test.go | 121 + .../docker/distribution/testutil/handler.go | 148 + .../docker/distribution/testutil/tarfile.go | 95 + .../docker/distribution/uuid/uuid.go | 126 + .../docker/distribution/uuid/uuid_test.go | 48 + .../docker/distribution/version/print.go | 26 + .../docker/distribution/version/version.go | 11 + .../docker/distribution/version/version.sh | 22 + .../github.com/docker/docker/api/README.md | 5 + .../docker/docker/api/api_unit_test.go | 19 + .../docker/docker/api/client/attach.go | 84 + .../docker/docker/api/client/build.go | 637 +++ .../docker/docker/api/client/cli.go | 171 + .../docker/docker/api/client/client.go | 5 + .../docker/docker/api/client/commit.go | 84 + .../github.com/docker/docker/api/client/cp.go | 305 ++ .../docker/docker/api/client/create.go | 185 + .../docker/docker/api/client/diff.go | 56 + .../docker/docker/api/client/events.go | 63 + .../docker/docker/api/client/exec.go | 134 + .../docker/docker/api/client/export.go | 46 + .../docker/docker/api/client/hijack.go | 257 ++ .../docker/docker/api/client/history.go | 77 + .../docker/docker/api/client/images.go | 130 + .../docker/docker/api/client/import.go | 77 + .../docker/docker/api/client/info.go | 108 + .../docker/docker/api/client/inspect.go | 157 + .../docker/docker/api/client/kill.go | 33 + .../docker/docker/api/client/load.go | 42 + .../docker/docker/api/client/login.go | 147 + .../docker/docker/api/client/logout.go | 38 + .../docker/docker/api/client/logs.go | 69 + .../docker/docker/api/client/network.go | 16 + .../docker/docker/api/client/pause.go | 32 + .../docker/docker/api/client/port.go | 72 + .../github.com/docker/docker/api/client/ps.go | 116 + .../docker/docker/api/client/ps/custom.go | 217 ++ .../docker/api/client/ps/custom_test.go | 88 + .../docker/docker/api/client/ps/formatter.go | 73 + .../docker/docker/api/client/pull.go | 53 + .../docker/docker/api/client/push.go | 53 + .../docker/docker/api/client/rename.go | 27 + .../docker/docker/api/client/restart.go | 39 + .../github.com/docker/docker/api/client/rm.go | 55 + .../docker/docker/api/client/rmi.go | 61 + .../docker/docker/api/client/run.go | 257 ++ .../docker/docker/api/client/save.go | 57 + .../docker/docker/api/client/search.go | 87 + .../docker/docker/api/client/service.go | 17 + .../docker/docker/api/client/start.go | 170 + .../docker/docker/api/client/stats.go | 202 + .../docker/api/client/stats_unit_test.go | 29 + .../docker/docker/api/client/stop.go | 41 + .../docker/docker/api/client/tag.go | 42 + .../docker/docker/api/client/top.go | 49 + .../docker/docker/api/client/trust.go | 435 +++ .../docker/docker/api/client/unpause.go | 32 + .../docker/docker/api/client/utils.go | 379 ++ .../docker/docker/api/client/version.go | 96 + .../docker/docker/api/client/wait.go | 35 + .../github.com/docker/docker/api/common.go | 133 + .../docker/docker/api/server/form.go | 56 + .../docker/docker/api/server/form_test.go | 70 + .../docker/docker/api/server/profiler.go | 38 + .../docker/docker/api/server/server.go | 1727 +++++++++ .../docker/api/server/server_experimental.go | 17 + .../docker/api/server/server_linux_test.go | 68 + .../docker/docker/api/server/server_stub.go | 6 + .../docker/docker/api/server/server_unix.go | 136 + .../docker/api/server/server_windows.go | 69 + .../docker/docker/api/types/stats.go | 91 + .../docker/docker/api/types/types.go | 280 ++ .../autogen/dockerversion/dockerversion.go | 11 + .../docker/docker/cliconfig/config.go | 227 ++ .../docker/docker/cliconfig/config_test.go | 188 + .../docker/docker/daemon/network/settings.go | 31 + .../docker/docker/graph/tags/tags.go | 29 + .../docker/graph/tags/tags_unit_test.go | 23 + .../github.com/docker/docker/image/image.go | 59 + .../github.com/docker/docker/image/spec/v1.md | 573 +++ .../github.com/docker/docker/opts/envfile.go | 30 +- .../docker/docker/opts/envfile_test.go | 133 + .../docker/docker/opts/hosts_unix.go | 7 + .../docker/docker/opts/hosts_windows.go | 7 + .../src/github.com/docker/docker/opts/ip.go | 1 + .../github.com/docker/docker/opts/ip_test.go | 54 + .../src/github.com/docker/docker/opts/opts.go | 141 +- .../docker/docker/opts/opts_test.go | 311 +- .../github.com/docker/docker/opts/ulimit.go | 13 +- .../docker/docker/opts/ulimit_test.go | 42 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 902 +++++ .../docker/docker/pkg/archive/archive_test.go | 1204 ++++++ .../docker/docker/pkg/archive/archive_unix.go | 89 + .../docker/pkg/archive/archive_unix_test.go | 60 + .../docker/pkg/archive/archive_windows.go | 50 + .../pkg/archive/archive_windows_test.go | 65 + .../docker/docker/pkg/archive/changes.go | 383 ++ .../docker/pkg/archive/changes_linux.go | 285 ++ .../docker/pkg/archive/changes_other.go | 97 + .../docker/pkg/archive/changes_posix_test.go | 127 + .../docker/docker/pkg/archive/changes_test.go | 495 +++ .../docker/docker/pkg/archive/changes_unix.go | 27 + .../docker/pkg/archive/changes_windows.go | 20 + .../docker/docker/pkg/archive/copy.go | 308 ++ .../docker/docker/pkg/archive/copy_test.go | 637 +++ .../docker/docker/pkg/archive/diff.go | 210 + .../docker/docker/pkg/archive/diff_test.go | 190 + .../docker/pkg/archive/example_changes.go | 97 + .../docker/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/utils_test.go | 166 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/archive/wrap_test.go | 98 + .../docker/docker/pkg/fileutils/fileutils.go | 196 + .../docker/pkg/fileutils/fileutils_test.go | 402 ++ .../docker/docker/pkg/homedir/homedir.go | 2 +- .../docker/docker/pkg/httputils/httputils.go | 58 + .../docker/docker/pkg/httputils/mimetype.go | 29 + .../pkg/httputils/resumablerequestreader.go | 95 + .../httputils/resumablerequestreader_test.go | 83 + .../docker/docker/pkg/ioutils/fmt.go | 14 + .../docker/docker/pkg/ioutils/fmt_test.go | 17 + .../docker/docker/pkg/ioutils/multireader.go | 226 ++ .../docker/pkg/ioutils/multireader_test.go | 149 + .../docker/docker/pkg/ioutils/readers.go | 254 ++ .../docker/docker/pkg/ioutils/readers_test.go | 216 ++ .../docker/docker/pkg/ioutils/scheduler.go | 6 + .../docker/pkg/ioutils/scheduler_gccgo.go | 13 + .../docker/docker/pkg/ioutils/writeflusher.go | 47 + .../docker/docker/pkg/ioutils/writers.go | 60 + .../docker/docker/pkg/ioutils/writers_test.go | 65 + .../docker/pkg/jsonmessage/jsonmessage.go | 172 + .../pkg/jsonmessage/jsonmessage_test.go | 210 + .../docker/docker/pkg/mflag/flag.go | 502 +-- .../docker/docker/pkg/mount/mount.go | 2 +- .../docker/docker/pkg/mount/mountinfo.go | 8 +- .../docker/pkg/mount/mountinfo_freebsd.go | 6 +- .../docker/pkg/mount/mountinfo_linux.go | 12 +- .../docker/pkg/mount/mountinfo_linux_test.go | 4 +- .../docker/pkg/mount/mountinfo_unsupported.go | 2 +- .../pkg/mount/sharedsubtree_linux_test.go | 2 +- .../docker/docker/{ => pkg}/nat/nat.go | 63 +- .../docker/docker/{ => pkg}/nat/nat_test.go | 192 +- .../docker/docker/{ => pkg}/nat/sort.go | 3 + .../docker/docker/{ => pkg}/nat/sort_test.go | 6 +- .../docker/pkg/parsers/filters/parse.go | 30 +- .../docker/pkg/parsers/filters/parse_test.go | 172 +- .../docker/pkg/parsers/kernel/kernel.go | 27 +- .../docker/pkg/parsers/kernel/kernel_test.go | 77 +- .../pkg/parsers/kernel/kernel_windows.go | 18 +- .../docker/pkg/parsers/kernel/uname_linux.go | 3 + .../pkg/parsers/kernel/uname_unsupported.go | 3 + .../operatingsystem_freebsd.go | 18 + .../operatingsystem/operatingsystem_linux.go | 4 + .../operatingsystem_windows.go | 4 +- .../docker/docker/pkg/parsers/parsers.go | 38 +- .../docker/docker/pkg/parsers/parsers_test.go | 105 +- .../docker/docker/pkg/pools/pools.go | 119 + .../docker/docker/pkg/pools/pools_test.go | 162 + .../docker/docker/pkg/promise/promise.go | 11 + .../docker/docker/pkg/random/random.go | 61 + .../docker/docker/pkg/random/random_test.go | 22 + .../docker/pkg/reexec/command_freebsd.go | 23 + .../docker/docker/pkg/reexec/command_linux.go | 10 + .../docker/pkg/reexec/command_unsupported.go | 3 +- .../docker/pkg/reexec/command_windows.go | 9 + .../docker/docker/pkg/reexec/reexec.go | 3 +- .../docker/docker/pkg/stdcopy/stdcopy.go | 168 + .../docker/docker/pkg/stdcopy/stdcopy_test.go | 85 + .../docker/docker/pkg/stringid/README.md | 1 + .../docker/docker/pkg/stringid/stringid.go | 67 + .../docker/pkg/stringid/stringid_test.go | 56 + .../docker/docker/pkg/symlink/LICENSE.APACHE | 191 + .../docker/docker/pkg/symlink/LICENSE.BSD | 27 + .../docker/docker/pkg/symlink/README.md | 5 + .../docker/docker/pkg/symlink/fs.go | 131 + .../docker/docker/pkg/symlink/fs_test.go | 402 ++ .../docker/docker/pkg/system/errors.go | 9 + .../docker/pkg/system/events_windows.go | 83 + .../docker/docker/pkg/system/filesys.go | 11 + .../docker/pkg/system/filesys_windows.go | 64 + .../docker/docker/pkg/system/lstat.go | 19 + .../docker/docker/pkg/system/lstat_test.go | 28 + .../docker/docker/pkg/system/lstat_windows.go | 29 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 71 + .../docker/pkg/system/meminfo_linux_test.go | 38 + .../docker/pkg/system/meminfo_unsupported.go | 7 + .../docker/pkg/system/meminfo_windows.go | 44 + .../docker/docker/pkg/system/mknod.go | 20 + .../docker/docker/pkg/system/mknod_windows.go | 11 + .../docker/docker/pkg/system/stat.go | 46 + .../docker/docker/pkg/system/stat_freebsd.go | 27 + .../docker/docker/pkg/system/stat_linux.go | 33 + .../docker/docker/pkg/system/stat_test.go | 37 + .../docker/pkg/system/stat_unsupported.go | 17 + .../docker/docker/pkg/system/stat_windows.go | 36 + .../docker/docker/pkg/system/umask.go | 11 + .../docker/docker/pkg/system/umask_windows.go | 8 + .../docker/docker/pkg/system/utimes_darwin.go | 11 + .../docker/pkg/system/utimes_freebsd.go | 24 + .../docker/docker/pkg/system/utimes_linux.go | 28 + .../docker/docker/pkg/system/utimes_test.go | 66 + .../docker/pkg/system/utimes_unsupported.go | 13 + .../docker/docker/pkg/system/xattrs_linux.go | 59 + .../docker/pkg/system/xattrs_unsupported.go | 11 + .../docker/pkg/tarsum/builder_context.go | 21 + .../docker/pkg/tarsum/builder_context_test.go | 63 + .../docker/docker/pkg/tarsum/fileinfosums.go | 126 + .../docker/pkg/tarsum/fileinfosums_test.go | 62 + .../docker/docker/pkg/tarsum/tarsum.go | 294 ++ .../docker/docker/pkg/tarsum/tarsum_spec.md | 225 ++ .../docker/docker/pkg/tarsum/tarsum_test.go | 648 ++++ .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes .../docker/pkg/tarsum/testdata/xattr/json | 1 + .../pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes .../docker/docker/pkg/tarsum/versioning.go | 150 + .../docker/pkg/tarsum/versioning_test.go | 98 + .../docker/docker/pkg/tarsum/writercloser.go | 22 + .../docker/docker/pkg/term/tc_linux_cgo.go | 51 + .../docker/docker/pkg/term/tc_other.go | 19 + .../github.com/docker/docker/pkg/term/term.go | 132 + .../docker/docker/pkg/term/term_windows.go | 205 + .../docker/docker/pkg/term/termios_darwin.go | 69 + .../docker/docker/pkg/term/termios_freebsd.go | 69 + .../docker/docker/pkg/term/termios_linux.go | 47 + .../docker/pkg/term/windows/ansi_reader.go | 256 ++ .../docker/pkg/term/windows/ansi_writer.go | 76 + .../docker/docker/pkg/term/windows/console.go | 61 + .../docker/docker/pkg/term/windows/windows.go | 5 + .../docker/pkg/term/windows/windows_test.go | 3 + .../docker/docker/pkg/timeutils/json.go | 27 + .../docker/docker/pkg/timeutils/json_test.go | 47 + .../docker/docker/pkg/timeutils/utils.go | 36 + .../docker/docker/pkg/timeutils/utils_test.go | 44 + .../docker/docker/pkg/tlsconfig/config.go | 132 + .../docker/docker/pkg/ulimit/ulimit.go | 71 +- .../docker/docker/pkg/units/duration.go | 4 +- .../docker/docker/pkg/units/size.go | 12 +- .../docker/docker/pkg/urlutil/urlutil.go | 50 + .../docker/docker/pkg/urlutil/urlutil_test.go | 55 + .../docker/docker/pkg/useragent/README.md | 1 + .../docker/docker/pkg/useragent/useragent.go | 55 + .../docker/pkg/useragent/useragent_test.go | 31 + .../docker/docker/pkg/version/version.go | 63 + .../docker/docker/pkg/version/version_test.go | 27 + .../github.com/docker/docker/registry/auth.go | 254 ++ .../docker/docker/registry/auth_test.go | 173 + .../docker/docker/registry/authchallenge.go | 150 + .../docker/docker/registry/config.go | 372 ++ .../docker/docker/registry/config_test.go | 49 + .../docker/docker/registry/endpoint.go | 277 ++ .../docker/docker/registry/endpoint_test.go | 93 + .../docker/docker/registry/reference.go | 68 + .../docker/docker/registry/registry.go | 237 ++ .../docker/registry/registry_mock_test.go | 476 +++ .../docker/docker/registry/registry_test.go | 918 +++++ .../docker/docker/registry/service.go | 206 + .../docker/docker/registry/session.go | 760 ++++ .../docker/docker/registry/token.go | 81 + .../docker/docker/registry/types.go | 140 + .../docker/docker/runconfig/compare.go | 6 - .../docker/docker/runconfig/compare_test.go | 124 + .../docker/docker/runconfig/config.go | 120 +- .../docker/docker/runconfig/config_test.go | 401 +- .../docker/docker/runconfig/exec.go | 24 +- .../docker/docker/runconfig/exec_test.go | 129 + .../fixtures/container_hostconfig_1_14.json | 18 + .../fixtures/container_hostconfig_1_19.json | 30 + .../docker/runconfig/fixtures/valid.env | 1 + .../docker/runconfig/fixtures/valid.label | 1 + .../docker/docker/runconfig/hostconfig.go | 229 +- .../docker/runconfig/hostconfig_test.go | 303 ++ .../docker/runconfig/hostconfig_unix.go | 60 + .../docker/runconfig/hostconfig_windows.go | 20 + .../docker/docker/runconfig/merge.go | 52 +- .../docker/docker/runconfig/merge_test.go | 83 + .../docker/docker/runconfig/parse.go | 226 +- .../docker/runconfig/parse_experimental.go | 4 +- .../docker/docker/runconfig/parse_test.go | 493 +++ .../docker/docker/runconfig/parse_unix.go | 58 + .../docker/docker/runconfig/parse_windows.go | 20 + .../docker/docker/utils/experimental.go | 9 + .../src/github.com/docker/docker/utils/git.go | 100 + .../docker/docker/utils/git_test.go | 186 + .../github.com/docker/docker/utils/stubs.go | 9 + .../github.com/docker/docker/utils/utils.go | 288 ++ .../docker/docker/utils/utils_test.go | 100 + .../docker/docker/volume/drivers/adapter.go | 60 + .../docker/docker/volume/drivers/api.go | 25 + .../docker/docker/volume/drivers/extpoint.go | 67 + .../docker/docker/volume/drivers/proxy.go | 149 + .../docker/volume/drivers/proxy_test.go | 96 + .../docker/docker/volume/local/local.go | 202 + .../github.com/docker/docker/volume/volume.go | 61 + .../libcompose/cli/logger/color_logger.go | 70 + .../docker/libcompose/cli/logger/colors.go | 34 + .../docker/libcompose/docker/builder.go | 157 + .../docker/libcompose/docker/client.go | 94 + .../libcompose/docker/client_factory.go | 32 + .../docker/libcompose/docker/container.go | 487 +++ .../docker/libcompose/docker/context.go | 33 + .../libcompose}/docker/convert.go | 37 +- .../libcompose}/docker/convert_test.go | 2 +- .../docker/libcompose/docker/functions.go | 49 + .../docker/libcompose/docker/labels.go | 47 + .../docker/libcompose/docker/name.go | 68 + .../docker/libcompose/docker/project.go | 50 + .../docker/libcompose/docker/service.go | 300 ++ .../libcompose/docker/service_factory.go | 11 + .../docker/libcompose/logger/null.go | 14 + .../docker/libcompose/logger/types.go | 24 + .../libcompose/lookup/file.go} | 4 +- .../docker/libcompose/lookup/simple_env.go | 20 + .../docker/libcompose/project/context.go | 132 + .../libcompose}/project/empty.go | 26 +- .../docker/libcompose/project/hash.go | 105 + .../docker/libcompose/project/info.go | 42 + .../libcompose}/project/listener.go | 9 + .../libcompose}/project/merge.go | 32 +- .../docker/libcompose/project/project.go | 371 ++ .../libcompose/project/service-wrapper.go | 116 + .../libcompose}/project/types.go | 89 +- .../libcompose}/project/types_yaml.go | 0 .../libcompose}/project/types_yaml_test.go | 0 .../docker/libcompose/project/utils.go | 63 + .../docker/libcompose/utils/util.go | 95 + .../docker/libnetwork/resolvconf/README.md | 1 + .../libnetwork/resolvconf/dns/resolvconf.go | 17 + .../libnetwork/resolvconf/resolvconf.go | 187 + .../libnetwork/resolvconf/resolvconf_test.go | 240 ++ .../docker/libtrust/CONTRIBUTING.md | 13 + .../src/github.com/docker/libtrust/LICENSE | 191 + .../github.com/docker/libtrust/MAINTAINERS | 3 + .../src/github.com/docker/libtrust/README.md | 18 + .../docker/libtrust/certificates.go | 175 + .../docker/libtrust/certificates_test.go | 111 + .../src/github.com/docker/libtrust/doc.go | 9 + .../src/github.com/docker/libtrust/ec_key.go | 428 ++ .../github.com/docker/libtrust/ec_key_test.go | 157 + .../src/github.com/docker/libtrust/filter.go | 50 + .../github.com/docker/libtrust/filter_test.go | 81 + .../src/github.com/docker/libtrust/hash.go | 56 + .../github.com/docker/libtrust/jsonsign.go | 657 ++++ .../docker/libtrust/jsonsign_test.go | 380 ++ .../src/github.com/docker/libtrust/key.go | 253 ++ .../github.com/docker/libtrust/key_files.go | 255 ++ .../docker/libtrust/key_files_test.go | 220 ++ .../github.com/docker/libtrust/key_manager.go | 175 + .../github.com/docker/libtrust/key_test.go | 80 + .../src/github.com/docker/libtrust/rsa_key.go | 427 ++ .../docker/libtrust/rsa_key_test.go | 157 + .../docker/libtrust/testutil/certificates.go | 94 + .../docker/libtrust/tlsdemo/README.md | 50 + .../docker/libtrust/tlsdemo/client.go | 89 + .../docker/libtrust/tlsdemo/gencert.go | 62 + .../docker/libtrust/tlsdemo/genkeys.go | 61 + .../docker/libtrust/tlsdemo/server.go | 80 + .../docker/libtrust/trustgraph/graph.go | 50 + .../libtrust/trustgraph/memory_graph.go | 133 + .../libtrust/trustgraph/memory_graph_test.go | 174 + .../docker/libtrust/trustgraph/statement.go | 227 ++ .../libtrust/trustgraph/statement_test.go | 417 ++ .../src/github.com/docker/libtrust/util.go | 363 ++ .../github.com/docker/libtrust/util_test.go | 45 + .../src/github.com/docker/machine/log/log.go | 123 + .../github.com/docker/machine/log/log_test.go | 19 + .../github.com/docker/machine/log/terminal.go | 129 + .../github.com/docker/machine/utils/b2d.go | 190 +- .../docker/machine/utils/b2d_test.go | 19 +- .../github.com/docker/machine/utils/certs.go | 66 +- .../docker/machine/utils/certs_test.go | 10 +- .../github.com/docker/machine/utils/utils.go | 109 +- .../docker/machine/utils/utils_test.go | 42 +- .../go-dockerclient/testing/data/Dockerfile | 15 + .../github.com/gorilla/context/.travis.yml | 9 + .../src/github.com/gorilla/context/LICENSE | 27 + .../src/github.com/gorilla/context/README.md | 7 + .../src/github.com/gorilla/context/context.go | 143 + .../gorilla/context/context_test.go | 161 + .../src/github.com/gorilla/context/doc.go | 82 + .../src/github.com/gorilla/mux/.travis.yml | 7 + .../src/github.com/gorilla/mux/LICENSE | 27 + .../src/github.com/gorilla/mux/README.md | 7 + .../src/github.com/gorilla/mux/bench_test.go | 21 + .../src/github.com/gorilla/mux/doc.go | 206 + .../src/github.com/gorilla/mux/mux.go | 465 +++ .../src/github.com/gorilla/mux/mux_test.go | 1195 ++++++ .../src/github.com/gorilla/mux/old_test.go | 714 ++++ .../src/github.com/gorilla/mux/regexp.go | 295 ++ .../src/github.com/gorilla/mux/route.go | 603 +++ .../runc}/libcontainer/user/MAINTAINERS | 0 .../runc}/libcontainer/user/lookup.go | 0 .../runc}/libcontainer/user/lookup_unix.go | 0 .../libcontainer/user/lookup_unsupported.go | 0 .../runc}/libcontainer/user/user.go | 28 +- .../runc}/libcontainer/user/user_test.go | 13 +- .../rancher/docker-from-scratch/.dockerignore | 2 + .../rancher/docker-from-scratch/.drone.yml | 3 + .../rancher/docker-from-scratch/.gitignore | 7 + .../docker-from-scratch/.wrap-docker-args | 1 + .../rancher/docker-from-scratch/Dockerfile | 8 + .../docker-from-scratch/Dockerfile.wrap | 3 + .../docker-from-scratch/Godeps/Godeps.json | 45 + .../rancher/docker-from-scratch/Godeps/Readme | 5 + .../rancher/docker-from-scratch/LICENSE | 178 + .../rancher/docker-from-scratch/README.md | 101 + .../base-image/.dockerignore | 5 + .../docker-from-scratch/base-image/.no-chown | 0 .../docker-from-scratch/base-image/Dockerfile | 5 + .../docker-from-scratch/base-image/build.sh | 24 + .../base-image/config/buildroot-config-static | 2230 +++++++++++ .../config/busybox-ps-modprobe-only.config | 1002 +++++ .../base-image/scripts/bootstrap | 8 + .../base-image/scripts/build | 8 + .../base-image/scripts/build-busybox-static | 49 + .../base-image/scripts/clean | 5 + .../base-image/scripts/download | 41 + .../base-image/scripts/package | 34 + .../rancher/docker-from-scratch/build.sh | 4 + .../rancher/docker-from-scratch/main/main.go | 37 + .../rancher/docker-from-scratch/scratch.go | 466 +++ .../scripts/Dockerfile.build | 8 + .../rancher/docker-from-scratch/scripts/build | 19 + .../rancher/docker-from-scratch/scripts/ci | 14 + .../docker-from-scratch/scripts/common | 2 + .../docker-from-scratch/scripts/download | 53 + .../docker-from-scratch/scripts/package | 11 + .../rancher/docker-from-scratch/scripts/test | 15 + .../docker-from-scratch/scripts/version | 1 + .../docker-from-scratch/util/util_linux.go | 41 + .../src/github.com/rancher/netconf/LICENSE | 178 + .../src/github.com/rancher/netconf/README.md | 3 + .../src/github.com/rancher/netconf/ipv4ll.go | 75 + .../src/github.com/rancher/netconf/netconf.go | 161 + .../src/github.com/rancher/netconf/types.go | 21 + .../rancherio/go-rancher/client/client.go | 7 - .../go-rancher/client/client_test.go | 236 -- .../rancherio/go-rancher/client/common.go | 418 -- .../go-rancher/client/generated_account.go | 138 - .../client/generated_active_setting.go | 71 - .../generated_add_load_balancer_input.go | 65 - ...generated_add_remove_cluster_host_input.go | 63 - ...ted_add_remove_load_balancer_host_input.go | 63 - ...add_remove_load_balancer_listener_input.go | 63 - ...d_add_remove_load_balancer_target_input.go | 65 - .../go-rancher/client/generated_agent.go | 147 - .../go-rancher/client/generated_api_key.go | 91 - .../client/generated_certificate.go | 87 - .../go-rancher/client/generated_client.go | 182 - .../go-rancher/client/generated_cluster.go | 103 - .../client/generated_config_item.go | 65 - .../client/generated_config_item_status.go | 75 - .../go-rancher/client/generated_container.go | 165 - .../client/generated_container_event.go | 105 - .../client/generated_container_exec.go | 69 - .../client/generated_container_logs.go | 65 - .../go-rancher/client/generated_credential.go | 140 - .../client/generated_databasechangelog.go | 81 - .../client/generated_databasechangeloglock.go | 67 - .../client/generated_digitalocean_config.go | 69 - .../generated_extension_implementation.go | 67 - .../client/generated_extension_point.go | 71 - .../client/generated_external_handler.go | 142 - ...al_handler_external_handler_process_map.go | 138 - .../generated_external_handler_process.go | 134 - .../client/generated_githubconfig.go | 71 - .../client/generated_global_load_balancer.go | 105 - ...rated_global_load_balancer_health_check.go | 63 - .../generated_global_load_balancer_policy.go | 63 - .../go-rancher/client/generated_host.go | 146 - .../client/generated_host_access.go | 65 - .../go-rancher/client/generated_image.go | 136 - .../go-rancher/client/generated_instance.go | 157 - .../client/generated_instance_console.go | 67 - .../generated_instance_console_input.go | 61 - .../client/generated_instance_link.go | 144 - .../client/generated_instance_stop.go | 67 - .../go-rancher/client/generated_ip_address.go | 147 - .../generated_ip_address_associate_input.go | 63 - .../client/generated_load_balancer.go | 107 - ...d_balancer_app_cookie_stickiness_policy.go | 75 - .../client/generated_load_balancer_config.go | 114 - ..._load_balancer_cookie_stickiness_policy.go | 75 - .../generated_load_balancer_health_check.go | 73 - .../generated_load_balancer_listener.go | 111 - .../client/generated_load_balancer_policy.go | 63 - .../client/generated_load_balancer_target.go | 105 - .../go-rancher/client/generated_machine.go | 129 - .../go-rancher/client/generated_mount.go | 144 - .../go-rancher/client/generated_network.go | 136 - .../client/generated_physical_host.go | 115 - .../go-rancher/client/generated_port.go | 148 - .../client/generated_process_definition.go | 73 - .../client/generated_process_execution.go | 67 - .../client/generated_process_instance.go | 85 - .../go-rancher/client/generated_project.go | 89 - .../go-rancher/client/generated_publish.go | 83 - .../go-rancher/client/generated_register.go | 93 - .../client/generated_registration_token.go | 136 - .../go-rancher/client/generated_registry.go | 89 - .../client/generated_registry_credential.go | 95 - .../generated_remove_load_balancer_input.go | 63 - .../client/generated_resource_definition.go | 63 - .../client/generated_restart_policy.go | 65 - ...generated_set_load_balancer_hosts_input.go | 63 - ...rated_set_load_balancer_listeners_input.go | 63 - ...nerated_set_load_balancer_targets_input.go | 65 - .../go-rancher/client/generated_setting.go | 65 - .../client/generated_stats_access.go | 65 - .../client/generated_storage_pool.go | 136 - .../go-rancher/client/generated_subscribe.go | 65 - .../go-rancher/client/generated_task.go | 70 - .../client/generated_task_instance.go | 73 - .../client/generated_type_documentation.go | 63 - .../client/generated_virtualbox_config.go | 67 - .../go-rancher/client/generated_volume.go | 158 - .../rancherio/go-rancher/client/types.go | 89 - .../librcompose/project/project.go | 279 -- .../librcompose/project/project_test.go | 27 - .../librcompose/project/service-wrapper.go | 199 - .../project/test_files/docker-compose.yml | 25 - .../rancher-compose/librcompose/util/util.go | 21 - .../samalba/dockerclient/.gitignore | 22 + .../github.com/samalba/dockerclient/LICENSE | 202 + .../github.com/samalba/dockerclient/README.md | 98 + .../github.com/samalba/dockerclient/auth.go | 38 + .../samalba/dockerclient/auth_test.go | 15 + .../samalba/dockerclient/dockerclient.go | 714 ++++ .../samalba/dockerclient/dockerclient_test.go | 240 ++ .../samalba/dockerclient/engine_mock_test.go | 245 ++ .../samalba/dockerclient/example_responses.go | 13 + .../samalba/dockerclient/examples/events.go | 39 + .../dockerclient/examples/stats/stats.go | 43 + .../samalba/dockerclient/interface.go | 46 + .../samalba/dockerclient/mockclient/mock.go | 162 + .../dockerclient/mockclient/mock_test.go | 32 + .../github.com/samalba/dockerclient/types.go | 444 +++ .../github.com/samalba/dockerclient/utils.go | 33 + .../stretchr/testify/assert/assertions.go | 895 +++++ .../testify/assert/assertions_test.go | 813 ++++ .../github.com/stretchr/testify/assert/doc.go | 154 + .../stretchr/testify/assert/errors.go | 10 + .../testify/assert/forward_assertions.go | 265 ++ .../testify/assert/forward_assertions_test.go | 511 +++ .../testify/assert/http_assertions.go | 157 + .../testify/assert/http_assertions_test.go | 86 + .../stretchr/testify/require/doc.go | 77 + .../testify/require/forward_requirements.go | 211 + .../require/forward_requirements_test.go | 260 ++ .../stretchr/testify/require/requirements.go | 271 ++ .../testify/require/requirements_test.go | 266 ++ .../x/crypto/ssh/terminal/terminal.go | 892 +++++ .../x/crypto/ssh/terminal/terminal_test.go | 269 ++ .../golang.org/x/crypto/ssh/terminal/util.go | 128 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 11 + .../x/crypto/ssh/terminal/util_windows.go | 174 + 842 files changed, 105257 insertions(+), 9836 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/.drone.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/.gitignore create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/.mailmap create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/LICENSE create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/Makefile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/blobs.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/circle.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/context.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/http.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/logger.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/trace.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/set.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/authentication.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/building.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/help.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.gliffy create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.png create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.svg create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/index.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/errors.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/health.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/manifest/sign.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/manifest/verify.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/version/print.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/version/version.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/version/version.sh create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/attach.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/build.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/client.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/create.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/events.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/export.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/history.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/images.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/import.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/info.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/load.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/login.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/network.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/port.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/ps/formatter.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/push.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/run.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/save.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/search.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/service.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/start.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/stats_unit_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/top.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/version.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/common.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/form.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/form_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_experimental.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_stub.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/types/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/cliconfig/config.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/cliconfig/config_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/daemon/network/settings.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags_unit_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/image/image.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/opts/envfile_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/opts/ip_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/opts/ulimit_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go rename Godeps/_workspace/src/github.com/docker/docker/{ => pkg}/nat/nat.go (64%) rename Godeps/_workspace/src/github.com/docker/docker/{ => pkg}/nat/nat_test.go (61%) rename Godeps/_workspace/src/github.com/docker/docker/{ => pkg}/nat/sort.go (92%) rename Godeps/_workspace/src/github.com/docker/docker/{ => pkg}/nat/sort_test.go (92%) create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/random/random_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/tlsconfig/config.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/auth.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/config.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/config_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/endpoint_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/reference.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/registry.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/service.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/session.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/token.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/registry/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/compare_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/exec_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_14.json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_19.json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.env create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.label create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_unix.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/merge_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_unix.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_windows.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/experimental.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/git.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/git_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/stubs.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/drivers/adapter.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/drivers/api.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/drivers/extpoint.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/local/local.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/volume/volume.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/color_logger.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/colors.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/builder.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/client.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/client_factory.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/container.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/context.go rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/docker/convert.go (73%) rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/docker/convert_test.go (92%) create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/functions.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/labels.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/name.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/project.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/service.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/docker/service_factory.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/logger/null.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/logger/types.go rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose/project/config_lookup.go => docker/libcompose/lookup/file.go} (81%) create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/lookup/simple_env.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/project/context.go rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/project/empty.go (53%) create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/project/hash.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/project/info.go rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/project/listener.go (88%) rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/project/merge.go (88%) create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/project/project.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/project/service-wrapper.go rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/project/types.go (70%) rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/project/types_yaml.go (100%) rename Godeps/_workspace/src/github.com/{rancherio/rancher-compose/librcompose => docker/libcompose}/project/types_yaml_test.go (100%) create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/project/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcompose/utils/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go create mode 100644 Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf.go create mode 100644 Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/LICENSE create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/certificates.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/filter.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/hash.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_files.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/util_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/machine/log/log.go create mode 100644 Godeps/_workspace/src/github.com/docker/machine/log/log_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/machine/log/terminal.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/README.md create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/README.md create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/old_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/regexp.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/route.go rename Godeps/_workspace/src/github.com/{docker => opencontainers/runc}/libcontainer/user/MAINTAINERS (100%) rename Godeps/_workspace/src/github.com/{docker => opencontainers/runc}/libcontainer/user/lookup.go (100%) rename Godeps/_workspace/src/github.com/{docker => opencontainers/runc}/libcontainer/user/lookup_unix.go (100%) rename Godeps/_workspace/src/github.com/{docker => opencontainers/runc}/libcontainer/user/lookup_unsupported.go (100%) rename Godeps/_workspace/src/github.com/{docker => opencontainers/runc}/libcontainer/user/user.go (93%) rename Godeps/_workspace/src/github.com/{docker => opencontainers/runc}/libcontainer/user/user_test.go (96%) create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.dockerignore create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.drone.yml create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.gitignore create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.wrap-docker-args create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile.wrap create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Godeps.json create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Readme create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/LICENSE create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/README.md create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.dockerignore create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.no-chown create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/build.sh create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/buildroot-config-static create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/busybox-ps-modprobe-only.config create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/bootstrap create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build-busybox-static create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/clean create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/download create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/package create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/build.sh create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/main/main.go create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scratch.go create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/Dockerfile.build create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/build create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/ci create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/common create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/download create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/package create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/test create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/version create mode 100644 Godeps/_workspace/src/github.com/rancher/docker-from-scratch/util/util_linux.go create mode 100644 Godeps/_workspace/src/github.com/rancher/netconf/LICENSE create mode 100644 Godeps/_workspace/src/github.com/rancher/netconf/README.md create mode 100644 Godeps/_workspace/src/github.com/rancher/netconf/ipv4ll.go create mode 100644 Godeps/_workspace/src/github.com/rancher/netconf/netconf.go create mode 100644 Godeps/_workspace/src/github.com/rancher/netconf/types.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client_test.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/common.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_account.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_active_setting.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_load_balancer_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_cluster_host_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_host_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_listener_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_target_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_agent.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_api_key.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_certificate.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_client.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_cluster.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item_status.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_event.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_exec.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_logs.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_credential.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangelog.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangeloglock.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_digitalocean_config.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_implementation.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_point.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_external_handler_process_map.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_process.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_githubconfig.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_health_check.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_policy.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host_access.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_image.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_link.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_stop.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address_associate_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_app_cookie_stickiness_policy.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_config.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_cookie_stickiness_policy.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_health_check.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_listener.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_policy.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_target.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_machine.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_mount.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_network.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_physical_host.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_port.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_definition.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_execution.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_instance.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_project.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_publish.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_register.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registration_token.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry_credential.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_remove_load_balancer_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_resource_definition.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_restart_policy.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_hosts_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_listeners_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_targets_input.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_setting.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_stats_access.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_storage_pool.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_subscribe.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task_instance.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_type_documentation.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_virtualbox_config.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_volume.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/go-rancher/client/types.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project_test.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/service-wrapper.go delete mode 100644 Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/test_files/docker-compose.yml delete mode 100644 Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/util/util.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/.gitignore create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/LICENSE create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/README.md create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/auth.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/auth_test.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient_test.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/engine_mock_test.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/example_responses.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/examples/events.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/examples/stats/stats.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/interface.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock_test.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/types.go create mode 100644 Godeps/_workspace/src/github.com/samalba/dockerclient/utils.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/require/requirements_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_windows.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 5d2950ec..07eb65fb 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,9 @@ { "ImportPath": "github.com/rancherio/os", - "GoVersion": "go1.4.2", + "GoVersion": "go1.4.1", + "Packages": [ + "./..." + ], "Deps": [ { "ImportPath": "github.com/Sirupsen/logrus", @@ -51,69 +54,247 @@ "Rev": "6b16a5714269b2f70720a45406b1babd947a17ef" }, { - "ImportPath": "github.com/docker/docker/nat", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "ImportPath": "github.com/docker/distribution", + "Comment": "v2.1.0-rc.0", + "Rev": "a0c63372fad430b7ab08d2763cb7d9e2c512c384" + }, + { + "ImportPath": "github.com/docker/docker/api", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/autogen/dockerversion", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/cliconfig", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/daemon/network", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/graph/tags", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/image", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/opts", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/archive", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/fileutils", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/homedir", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/httputils", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/ioutils", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/jsonmessage", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/mflag", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/nat", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/parsers", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/pools", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/promise", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/random", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/reexec", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/stdcopy", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/stringid", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/symlink", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/system", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/tarsum", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/term", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/timeutils", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/tlsconfig", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/ulimit", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/pkg/units", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/urlutil", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/useragent", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/pkg/version", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/registry", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" }, { "ImportPath": "github.com/docker/docker/runconfig", - "Comment": "v1.7.0", - "Rev": "0baf60984522744eed290348f33f396c046b2f3a" + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/utils", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/docker/volume", + "Comment": "v1.4.1-5200-gf39987a", + "Rev": "f39987afe8d611407887b3094c03d6ba6a766a67" + }, + { + "ImportPath": "github.com/docker/libcompose/cli/logger", + "Rev": "7dc5362063183024d12cafaa2ba8ea8830b7ab62" + }, + { + "ImportPath": "github.com/docker/libcompose/docker", + "Rev": "7dc5362063183024d12cafaa2ba8ea8830b7ab62" + }, + { + "ImportPath": "github.com/docker/libcompose/logger", + "Rev": "7dc5362063183024d12cafaa2ba8ea8830b7ab62" + }, + { + "ImportPath": "github.com/docker/libcompose/lookup", + "Rev": "7dc5362063183024d12cafaa2ba8ea8830b7ab62" + }, + { + "ImportPath": "github.com/docker/libcompose/project", + "Rev": "7dc5362063183024d12cafaa2ba8ea8830b7ab62" + }, + { + "ImportPath": "github.com/docker/libcompose/utils", + "Rev": "7dc5362063183024d12cafaa2ba8ea8830b7ab62" }, { "ImportPath": "github.com/docker/libcontainer/netlink", - "Comment": "v2.2.1", - "Rev": "5dc7ba0f24332273461e45bc49edcb4d5aa6c44c" + "Comment": "v2.2.1-23-g83a102c", + "Rev": "83a102cc68a09d890cce3b6c2e5c14c49e6373a0" }, { - "ImportPath": "github.com/docker/libcontainer/user", - "Comment": "v2.2.1", - "Rev": "5dc7ba0f24332273461e45bc49edcb4d5aa6c44c" + "ImportPath": "github.com/docker/libnetwork/resolvconf", + "Comment": "v0.2-301-g0cc39f8", + "Rev": "0cc39f87276366ef6f22961ef2018d957d662724" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a" + }, + { + "ImportPath": "github.com/docker/machine/log", + "Comment": "v0.3.0-rc1-172-g4a8e93a", + "Rev": "4a8e93ac9bc2ced1c3bc4a43c03fdaa1c2749205" }, { "ImportPath": "github.com/docker/machine/utils", - "Comment": "v0.1.0-rc4", - "Rev": "d674e87813ffc10048f55d884396be1af327705e" + "Comment": "v0.3.0-rc1-172-g4a8e93a", + "Rev": "4a8e93ac9bc2ced1c3bc4a43c03fdaa1c2749205" }, { "ImportPath": "github.com/flynn/go-shlex", @@ -123,6 +304,14 @@ "ImportPath": "github.com/fsouza/go-dockerclient", "Rev": "4c9e84441078c14677bad5722161ad09146b0c4e" }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "f15e0c49460fd49eebe2bcc8486b05d1bef68d3a" + }, { "ImportPath": "github.com/guelfey/go.dbus", "Rev": "f6a3a2366cc39b8479cadc499d3c735fb10fbdda" @@ -136,33 +325,45 @@ "Rev": "d57d9d2d5be197e12d9dee142d855470d83ce62f" }, { - "ImportPath": "github.com/rancherio/go-rancher/client", - "Comment": "v0.1.0", - "Rev": "166cde1ff3be90b9199a3703177668b647cbdcf4" + "ImportPath": "github.com/opencontainers/runc/libcontainer/user", + "Comment": "v0.0.2-32-gb40c790", + "Rev": "b40c7901845dcec5950ecb37cb9de178fc2c0604" }, { - "ImportPath": "github.com/rancherio/rancher-compose/librcompose/docker", - "Comment": "v0.1.3", - "Rev": "43423a0bbedafb961bfe15d2a1adf77379f04c07" + "ImportPath": "github.com/rancher/docker-from-scratch", + "Comment": "v1.7.1-2-12-gf2f4b5e", + "Rev": "f2f4b5e9b789016fbc4578814a1f4bd0f8c3ba80" }, { - "ImportPath": "github.com/rancherio/rancher-compose/librcompose/project", - "Comment": "v0.1.3", - "Rev": "43423a0bbedafb961bfe15d2a1adf77379f04c07" - }, - { - "ImportPath": "github.com/rancherio/rancher-compose/librcompose/util", - "Comment": "v0.1.3", - "Rev": "43423a0bbedafb961bfe15d2a1adf77379f04c07" + "ImportPath": "github.com/rancher/netconf", + "Rev": "157105e12d6963f6a1c5765540261f6878f0de89" }, { "ImportPath": "github.com/ryanuber/go-glob", "Rev": "0067a9abd927e50aed5190662702f81231413ae0" }, + { + "ImportPath": "github.com/samalba/dockerclient", + "Rev": "16320a397fb98ba77c00283de77930e37b7a2153" + }, + { + "ImportPath": "github.com/stretchr/testify/assert", + "Comment": "v1.0-17-g089c718", + "Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff" + }, + { + "ImportPath": "github.com/stretchr/testify/require", + "Comment": "v1.0-17-g089c718", + "Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff" + }, { "ImportPath": "github.com/vishvananda/netlink", "Rev": "ae3e7dba57271b4e976c4f91637861ee477135e2" }, + { + "ImportPath": "golang.org/x/crypto/ssh/terminal", + "Rev": "2f3083f6163ef51179ad42ed523a18c9a1141467" + }, { "ImportPath": "golang.org/x/net/context", "Rev": "84ba27dd5b2d8135e9da1395277f2c9333a2ffda" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml b/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml new file mode 100644 index 00000000..d943e19f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml @@ -0,0 +1,38 @@ +image: dmp42/go:stable + +script: + # To be spoofed back into the test image + - go get github.com/modocache/gover + + - go get -t ./... + + # Go fmt + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + # Go lint + - test -z "$(golint ./... | tee /dev/stderr)" + # Go vet + - go vet ./... + # Go test + - go test -v -race -cover ./... + # Helper to concatenate reports + - gover + # Send to coverall + - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} + + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck + +notify: + email: + recipients: + - distribution@docker.com + + slack: + team: docker + channel: "#dt" + username: mom + token: {{SLACK_TOKEN}} + on_success: true + on_failure: true diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.gitignore b/Godeps/_workspace/src/github.com/docker/distribution/.gitignore new file mode 100644 index 00000000..1c3ae0a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/.gitignore @@ -0,0 +1,37 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap new file mode 100644 index 00000000..bcfe6635 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap @@ -0,0 +1,6 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS new file mode 100644 index 00000000..2b9e4c3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS @@ -0,0 +1,61 @@ +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Elman +Amy Lindburg +Andrey Kostov +Andy Goldstein +Anton Tiurin +Antonio Mercado +Arnaud Porterie +BadZen +Ben Firshman +bin liu +Brian Bland +burnettk +Daisuke Fujita +Dave Trombley +David Lawrence +David Xia +Derek McGowan +Diogo Mónica +Donald Huang +Doug Davis +Frederick F. Kautz IV +Henri Gomez +Hu Keping +Ian Babrou +Jeff Nickoloff +Jessie Frazelle +Jordan Liggitt +Josh Hawn +Julien Fernandez +Kelsey Hightower +Kenneth Lim +Mary Anthony +Matt Robenolt +Michael Prokop +moxiegirl +Nathan Sullivan +Nghia Tran +Oilbeater +Olivier Gambier +Philip Misiowiec +Richard Scothern +Richard Scothern +Sebastiaan van Stijn +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +Spencer Rinehart +Stephen J Day +Thomas Sjögren +Tianon Gravi +Tibor Vass +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King +xiekeyang +Yann ROBERT +yuzou diff --git a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 00000000..b91a1d0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,139 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry -version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile new file mode 100644 index 00000000..5555606f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.4 + +RUN apt-get update && \ + apt-get install -y librados-dev apache2-utils && \ + rm -rf /var/lib/apt/lists/* + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH +ENV DOCKER_BUILDTAGS include_rados + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml $DISTRIBUTION_DIR/cmd/registry/config.yml +RUN make PREFIX=/go clean binaries + +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["cmd/registry/config.yml"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json new file mode 100644 index 00000000..355596df --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json @@ -0,0 +1,135 @@ +{ + "ImportPath": "github.com/docker/distribution", + "GoVersion": "go1.4.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/AdRoll/goamz/aws", + "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" + }, + { + "ImportPath": "github.com/AdRoll/goamz/cloudfront", + "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" + }, + { + "ImportPath": "github.com/AdRoll/goamz/s3", + "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/bugsnag/panicwrap", + "Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27" + }, + { + "ImportPath": "github.com/codegangsta/cli", + "Comment": "1.2.0-66-g6086d79", + "Rev": "6086d7927ec35315964d9fea46df6c04e6d697c1" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" + }, + { + "ImportPath": "github.com/docker/docker/pkg/tarsum", + "Comment": "v1.4.1-3932-gb63ec6e", + "Rev": "b63ec6e4b1f6f5c77a6a74a52fcea9564538c575" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" + }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" + }, + { + "ImportPath": "github.com/gorilla/handlers", + "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/noahdesu/go-ceph/rados", + "Comment": "v.0.3.0-29-gb15639c", + "Rev": "b15639c44c05368348355229070361395d9152ee" + }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "22c8fa9fb5ba145b4d4e2cebb027e84b1a7b1296" + }, + { + "ImportPath": "github.com/yvasiyarov/go-metrics", + "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" + }, + { + "ImportPath": "github.com/yvasiyarov/gorelic", + "Comment": "v0.0.6-8-ga9bba5b", + "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" + }, + { + "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", + "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" + }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/LICENSE b/Godeps/_workspace/src/github.com/docker/distribution/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 00000000..0abd7d4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,4 @@ +Solomon Hykes (@shykes) +Olivier Gambier (@dmp42) +Sam Alba (@samalba) +Stephen Day (@stevvooe) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/Makefile new file mode 100644 index 00000000..0a8d77ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Makefile @@ -0,0 +1,74 @@ +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version $(VERSION)" + +.PHONY: clean all fmt vet lint build test binaries +.DEFAULT: default +all: AUTHORS clean fmt vet fmt lint build test binaries + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry + +${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template + +${PREFIX}/bin/dist: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/dist + +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template + ./bin/registry-api-descriptor-template $< > $@ + +# Depends on binaries because vet will silently fail if it can't load compiled +# imports +vet: binaries + @echo "+ $@" + @go vet ./... + +fmt: + @echo "+ $@" + @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ + echo "+ please format Go code with 'gofmt -s'" + +lint: + @echo "+ $@" + @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + +build: + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... + +test: + @echo "+ $@" + @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... + +test-full: + @echo "+ $@" + @go test ./... + +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template ${PREFIX}/bin/dist + @echo "+ $@" + +clean: + @echo "+ $@" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/README.md b/Godeps/_workspace/src/github.com/docker/distribution/README.md new file mode 100644 index 00000000..ce215887 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/README.md @@ -0,0 +1,129 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the [docker/docker- +registry](https://github.com/docker/docker-registry) project with a new API +design, focused around security and performance. + + + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **dist** | An _experimental_ tool to provide distribution, oriented functionality without the `docker` daemon. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](http://docs.docker.com/installation) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](docs/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](docs/building.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE.md). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md b/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 00000000..cbf53881 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,273 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Mirroring and Pull-through Caching + +Mirroring and pull-through caching are related but slight different. We've +adopted the term _mirroring_ to be a proper mirror of a registry, meaning it +has all the content the upstream would have. Providing such mirrors in the +Docker ecosystem is dependent on a solid trust system, which is still in the +works. + +The more commonly helpful feature is _pull-through caching_, where data is +fetched from an upstream when not available in a local registry instance. + +Please see the following issues: + +- https://github.com/docker/distribution/issues/459 + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to a arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +Distribution Components map to Docker Platform Releases via the use of labels. Project Pages are used to define the set of features that are included in each Docker Platform Release. + +| Platform Version | Label | Planning | +|-----------|------|-----| +| Docker 1.6 | [Docker/1.6](https://github.com/docker/distribution/labels/docker%2F1.6) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.6-Project-Page) | +| Docker 1.7| [Docker/1.7](https://github.com/docker/distribution/labels/docker%2F1.7) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.7-Project-Page) | +| Docker 1.8| [Docker/1.8](https://github.com/docker/distribution/labels/docker%2F1.8) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.8-Project-Page) | + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/blobs.go b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go new file mode 100644 index 00000000..556bf93e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go @@ -0,0 +1,205 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") + + // ErrUnsupported returned when an unsupported operation is attempted + ErrUnsupported = errors.New("unsupported operation") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteSeeker + io.ReaderFrom + io.Closer + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error + + // Get a reader to the blob being written by this BlobWriter + Reader() (io.ReadCloser, error) +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml new file mode 100644 index 00000000..7bd48373 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml @@ -0,0 +1,127 @@ +# Pony-up! +machine: + pre: + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install ceph to test rados driver & create pool + - sudo -i ~/distribution/contrib/ceph/ci-setup.sh + - ceph osd pool create docker-distribution 1 + + post: + # Install many go versions + # - gvm install go1.3.3 -B --name=old + - gvm install go1.4.2 -B --name=stable + # - gvm install tip --name=bleed + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_rados include_oss" + # Workaround Circle parsing dumb bugs and/or YAML wonkyness + CIRCLE_PAIN: "mode: set" + # Ceph config + RADOS_POOL: "docker-distribution" + + hosts: + # Not used yet + fancy: 127.0.0.1 + +dependencies: + pre: + # Copy the code to the gopath of all go versions + # - > + # gvm use old && + # mkdir -p "$(dirname $BASE_OLD)" && + # cp -R "$CHECKOUT" "$BASE_OLD" + + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + # - > + # gvm use bleed && + # mkdir -p "$(dirname $BASE_BLEED)" && + # cp -R "$CHECKOUT" "$BASE_BLEED" + + override: + # Install dependencies for every copied clone/go version + # - gvm use old && go get github.com/tools/godep: + # pwd: $BASE_OLD + + - gvm use stable && go get github.com/tools/godep: + pwd: $BASE_STABLE + + # - gvm use bleed && go get github.com/tools/godep: + # pwd: $BASE_BLEED + + post: + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + # Disabling goveralls for now + # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint + +test: + pre: + # Output the go versions we are going to test + # - gvm use old && go version + - gvm use stable && go version + # - gvm use bleed && go version + + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && godep go install ./...: + pwd: $BASE_STABLE + + # FMT + - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + pwd: $BASE_STABLE + + # VET + - gvm use stable && go vet ./...: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + pwd: $BASE_STABLE + + override: + # Test every version we have (but stable) + # - gvm use old; godep go test -test.v -test.short ./...: + # timeout: 600 + # pwd: $BASE_OLD + + # - gvm use bleed; go test -test.v -test.short ./...: + # timeout: 600 + # pwd: $BASE_BLEED + + # Test stable, and report + # Preset the goverall report file + - echo "$CIRCLE_PAIN" > ~/goverage.report + - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: + pwd: $BASE_STABLE + - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: + timeout: 600 + pwd: $BASE_STABLE + + post: + # Aggregate and report to coveralls + - gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report: + pwd: $BASE_STABLE +# - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN: +# pwd: $BASE_STABLE + + ## Notes + # Disabled coveralls reporting: build breaking sending coverage data to coveralls + # Disabled the -race detector due to massive memory usage. + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go new file mode 100644 index 00000000..e540d4d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go @@ -0,0 +1,14 @@ +package main + +import "github.com/codegangsta/cli" + +var ( + commandList = cli.Command{ + Name: "images", + Usage: "List available images", + Action: imageList, + } +) + +func imageList(c *cli.Context) { +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go new file mode 100644 index 00000000..34a2b514 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "os" + + "github.com/codegangsta/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "dist" + app.Usage = "Package and ship Docker content" + + app.Action = commandList.Action + app.Commands = []cli.Command{ + commandList, + commandPull, + commandPush, + } + app.Run(os.Args) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go new file mode 100644 index 00000000..8f96129c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go @@ -0,0 +1,21 @@ +package main + +import "github.com/codegangsta/cli" + +var ( + commandPull = cli.Command{ + Name: "pull", + Usage: "Pull and verify an image from a registry", + Action: imagePull, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "r,registry", + Value: "hub.docker.io", + Usage: "Registry to use (e.g.: localhost:5000)", + }, + }, + } +) + +func imagePull(c *cli.Context) { +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go new file mode 100644 index 00000000..c39922aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go @@ -0,0 +1,21 @@ +package main + +import "github.com/codegangsta/cli" + +var ( + commandPush = cli.Command{ + Name: "push", + Usage: "Push an image to a registry", + Action: imagePush, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "r,registry", + Value: "hub.docker.io", + Usage: "Registry to use (e.g.: localhost:5000)", + }, + }, + } +) + +func imagePush(*cli.Context) { +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go new file mode 100644 index 00000000..05a1b487 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go @@ -0,0 +1,127 @@ +// registry-api-descriptor-template uses the APIDescriptor defined in the +// api/v2 package to execute templates passed to the command line. +// +// For example, to generate a new API specification, one would execute the +// following command from the repo root: +// +// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md +// +// The templates are passed in the api/v2.APIDescriptor object. Please see the +// package documentation for fields available on that object. The template +// syntax is from Go's standard library text/template package. For information +// on Go's template syntax, please see golang.org/pkg/text/template. +package main + +import ( + "log" + "net/http" + "os" + "path/filepath" + "regexp" + "text/template" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" +) + +var spaceRegex = regexp.MustCompile(`\n\s*`) + +func main() { + + if len(os.Args) != 2 { + log.Fatalln("please specify a template to execute.") + } + + path := os.Args[1] + filename := filepath.Base(path) + + funcMap := template.FuncMap{ + "removenewlines": func(s string) string { + return spaceRegex.ReplaceAllString(s, " ") + }, + "statustext": http.StatusText, + "prettygorilla": prettyGorillaMuxPath, + } + + tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) + + data := struct { + RouteDescriptors []v2.RouteDescriptor + ErrorDescriptors []errcode.ErrorDescriptor + }{ + RouteDescriptors: v2.APIDescriptor.RouteDescriptors, + ErrorDescriptors: errcode.GetErrorCodeGroup("registry.api.v2"), + } + + if err := tmpl.Execute(os.Stdout, data); err != nil { + log.Fatalln(err) + } +} + +// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux +// route string, making it suitable for documentation. +func prettyGorillaMuxPath(s string) string { + // Stateful parser that removes regular expressions from gorilla + // routes. It correctly handles balanced bracket pairs. + + var output string + var label string + var level int + +start: + if s[0] == '{' { + s = s[1:] + level++ + goto capture + } + + output += string(s[0]) + s = s[1:] + + goto end +capture: + switch s[0] { + case '{': + level++ + case '}': + level-- + + if level == 0 { + s = s[1:] + goto label + } + case ':': + s = s[1:] + goto skip + default: + label += string(s[0]) + } + s = s[1:] + goto capture +skip: + switch s[0] { + case '{': + level++ + case '}': + level-- + } + s = s[1:] + + if level == 0 { + goto label + } + + goto skip +label: + if label != "" { + output += "<" + label + ">" + label = "" + } +end: + if s != "" { + goto start + } + + return output + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml new file mode 100644 index 00000000..0b524043 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml @@ -0,0 +1,48 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development +storage: + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry-cache + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-8082 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +proxy: + remoteurl: https://registry-1.docker.io + username: username + password: password diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml new file mode 100644 index 00000000..3f4616d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml @@ -0,0 +1,60 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + debug: + addr: localhost:5001 +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml new file mode 100644 index 00000000..cb91e63d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml @@ -0,0 +1,11 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + layerinfo: inmemory + filesystem: + rootdirectory: /var/lib/registry +http: + addr: :5000 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go new file mode 100644 index 00000000..9196d316 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go @@ -0,0 +1,313 @@ +package main + +import ( + "crypto/tls" + "crypto/x509" + _ "expvar" + "flag" + "fmt" + "io/ioutil" + "net/http" + _ "net/http/pprof" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/formatters/logstash" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + _ "github.com/docker/distribution/health" + _ "github.com/docker/distribution/registry/auth/htpasswd" + _ "github.com/docker/distribution/registry/auth/silly" + _ "github.com/docker/distribution/registry/auth/token" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + _ "github.com/docker/distribution/registry/proxy" + _ "github.com/docker/distribution/registry/storage/driver/azure" + _ "github.com/docker/distribution/registry/storage/driver/filesystem" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/oss" + _ "github.com/docker/distribution/registry/storage/driver/s3" + _ "github.com/docker/distribution/registry/storage/driver/swift" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + gorhandlers "github.com/gorilla/handlers" + "github.com/yvasiyarov/gorelic" +) + +var showVersion bool + +func init() { + flag.BoolVar(&showVersion, "version", false, "show the version and exit") +} + +func main() { + flag.Usage = usage + flag.Parse() + + if showVersion { + version.PrintVersion() + return + } + + ctx := context.Background() + ctx = context.WithValue(ctx, "version", version.Version) + + config, err := resolveConfiguration() + if err != nil { + fatalf("configuration error: %v", err) + } + + ctx, err = configureLogging(ctx, config) + if err != nil { + fatalf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = context.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, *config) + handler := configureReporting(app) + handler = panicHandler(handler) + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + + if config.HTTP.Debug.Addr != "" { + go debugServer(config.HTTP.Debug.Addr) + } + + server := &http.Server{ + Handler: handler, + } + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + context.GetLogger(app).Fatalln(err) + } + defer ln.Close() + + if config.HTTP.TLS.Certificate != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"http/1.1"}, + Certificates: make([]tls.Certificate, 1), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + context.GetLogger(app).Fatalln(err) + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + context.GetLogger(app).Fatalln(err) + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + context.GetLogger(app).Infof("listening on %v, tls", ln.Addr()) + } else { + context.GetLogger(app).Infof("listening on %v", ln.Addr()) + } + + if err := server.Serve(ln); err != nil { + context.GetLogger(app).Fatalln(err) + } +} + +func usage() { + fmt.Fprintln(os.Stderr, "usage:", os.Args[0], "") + flag.PrintDefaults() +} + +func fatalf(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, format+"\n", args...) + usage() + os.Exit(1) +} + +func resolveConfiguration() (*configuration.Configuration, error) { + var configurationPath string + + if flag.NArg() > 0 { + configurationPath = flag.Arg(0) + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + // log the application version with messages + ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// debugServer starts the debug server with pprof, expvar among other +// endpoints. The addr should not be exposed externally. For most of these to +// work, tls cannot be enabled on the endpoint, so it is generally separate. +func debugServer(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } +} + +// panicHandler add a HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go new file mode 100644 index 00000000..e7ea770a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go @@ -0,0 +1,5 @@ +// +build include_rados + +package main + +import _ "github.com/docker/distribution/registry/storage/driver/rados" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go new file mode 100644 index 00000000..502dab3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go @@ -0,0 +1,486 @@ +package configuration + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables +type Configuration struct { + // Version is the version which defines the format of the rest of the configuration + Version Version `yaml:"version"` + + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configurate the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. This is + // deprecated. Please use Log.Level in the future. + Loglevel Loglevel `yaml:"loglevel,omitempty"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` + + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth,omitempty"` + + // Middleware lists all middlewares to be used by the registry. + Middleware map[string][]Middleware `yaml:"middleware,omitempty"` + + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting,omitempty"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + } `yaml:"tls,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + } `yaml:"http,omitempty"` + + // Notifications specifies configuration about various endpoint to which + // registry events are dispatched. + Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configurate email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure cerification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` +} + +// v0_1Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v0_1Configuration Configuration + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil +} + +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + +// Storage defines the configuration for registry object storage +type Storage map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + // Return only key in this map + for k := range storage { + switch k { + case "maintenance": + // allow configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + return k + } + } + return "" +} + +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key string, value interface{}) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + switch k { + case "maintenance": + // allow for configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (storage Storage) MarshalYAML() (interface{}, error) { + if storage.Parameters() == nil { + return storage.Type(), nil + } + return map[string]Parameters(storage), nil +} + +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} + +// Notifications configures multiple http endpoints. +type Notifications struct { + // Endpoints is a list of http configurations for endpoints that + // respond to webhook notifications. In the future, we may allow other + // kinds of endpoints, such as external queues. + Endpoints []Endpoint `yaml:"endpoints,omitempty"` +} + +// Endpoint describes the configuration of an http webhook notification +// endpoint. +type Endpoint struct { + Name string `yaml:"name"` // identifies the endpoint in the registry instance. + Disabled bool `yaml:"disabled"` // disables the endpoint + URL string `yaml:"url"` // post url for the endpoint. + Headers http.Header `yaml:"headers"` // static headers that should be added to all requests + Timeout time.Duration `yaml:"timeout"` // HTTP timeout + Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure + Backoff time.Duration `yaml:"backoff"` // backoff duration +} + +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey,omitempty"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage,omitempty"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint,omitempty"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey,omitempty"` + // Name is the component name of the registry in NewRelic + Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` +} + +// Middleware configures named middlewares to be applied at injection points. +type Middleware struct { + // Name the middleware registers itself as + Name string `yaml:"name"` + // Flag to disable middleware easily + Disabled bool `yaml:"disabled,omitempty"` + // Map of parameters that will be passed to the middleware's initialization function + Options Parameters `yaml:"options"` +} + +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + +// Parse parses an input configuration yaml document into a Configuration struct +// This should generally be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Loglevel == Loglevel("") { + v0_1.Loglevel = Loglevel("info") + } + if v0_1.Storage.Type() == "" { + return nil, fmt.Errorf("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) + + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go new file mode 100644 index 00000000..24076e2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go @@ -0,0 +1,370 @@ +package configuration + +import ( + "bytes" + "net/http" + "os" + "testing" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +// Hook up gocheck into the "go test" runner +func Test(t *testing.T) { TestingT(t) } + +// configStruct is a canonical example configuration, which should map to configYamlV0_1 +var configStruct = Configuration{ + Version: "0.1", + Log: struct { + Level Loglevel `yaml:"level"` + Formatter string `yaml:"formatter,omitempty"` + Fields map[string]interface{} `yaml:"fields,omitempty"` + Hooks []LogHook `yaml:"hooks,omitempty"` + }{ + Fields: map[string]interface{}{"environment": "test"}, + }, + Loglevel: "info", + Storage: Storage{ + "s3": Parameters{ + "region": "us-east-1", + "bucket": "my-bucket", + "rootdirectory": "/registry", + "encrypt": true, + "secure": false, + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": nil, + "port": 42, + }, + }, + Auth: Auth{ + "silly": Parameters{ + "realm": "silly", + "service": "silly", + }, + }, + Reporting: Reporting{ + Bugsnag: BugsnagReporting{ + APIKey: "BugsnagApiKey", + }, + }, + Notifications: Notifications{ + Endpoints: []Endpoint{ + { + Name: "endpoint-1", + URL: "http://example.com", + Headers: http.Header{ + "Authorization": []string{"Bearer "}, + }, + }, + }, + }, + HTTP: struct { + Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + TLS struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + } `yaml:"tls,omitempty"` + Debug struct { + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + }{ + TLS: struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + }{ + ClientCAs: []string{"/path/to/ca.pem"}, + }, + }, +} + +// configYamlV0_1 is a Version 0.1 yaml document representing configStruct +var configYamlV0_1 = ` +version: 0.1 +log: + fields: + environment: test +loglevel: info +storage: + s3: + region: us-east-1 + bucket: my-bucket + rootdirectory: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: 42 +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] +reporting: + bugsnag: + apikey: BugsnagApiKey +http: + clientcas: + - /path/to/ca.pem +` + +// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory +// storage driver with no parameters +var inmemoryConfigYamlV0_1 = ` +version: 0.1 +loglevel: info +storage: inmemory +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] +` + +type ConfigSuite struct { + expectedConfig *Configuration +} + +var _ = Suite(new(ConfigSuite)) + +func (suite *ConfigSuite) SetUpTest(c *C) { + os.Clearenv() + suite.expectedConfig = copyConfig(configStruct) +} + +// TestMarshalRoundtrip validates that configStruct can be marshaled and +// unmarshaled without changing any parameters +func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + config, err := Parse(bytes.NewReader(configBytes)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseSimple validates that configYamlV0_1 can be parsed into a struct +// matching configStruct +func (suite *ConfigSuite) TestParseSimple(c *C) { + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInmemory validates that configuration yaml with storage provided as +// a string can be parsed into a Configuration struct with no storage parameters +func (suite *ConfigSuite) TestParseInmemory(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Log.Fields = nil + + config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseIncomplete validates that an incomplete yaml configuration cannot +// be parsed without providing environment variables to fill in the missing +// components. +func (suite *ConfigSuite) TestParseIncomplete(c *C) { + incompleteConfigYaml := "version: 0.1" + _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, NotNil) + + suite.expectedConfig.Log.Fields = nil + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} + suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Notifications = Notifications{} + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + os.Setenv("REGISTRY_AUTH", "silly") + os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") + + config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvStorage validates that providing environment variables +// that match the given storage type will only include environment-defined +// parameters and remove yaml-defined parameters +func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { + suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} + + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change +// and add to the given storage parameters will change and add parameters to the parsed +// Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { + suite.expectedConfig.Storage.setParameter("region", "us-west-1") + suite.expectedConfig.Storage.setParameter("secure", true) + suite.expectedConfig.Storage.setParameter("newparam", "some Value") + + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") + os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") + os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageType validates that providing an environment variable that +// changes the storage type will be reflected in the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + + os.Setenv("REGISTRY_STORAGE", "inmemory") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithExtraneousEnvStorageParams validates that environment variables +// that change parameters out of the scope of the specified storage type are +// ignored. +func (suite *ConfigSuite) TestParseWithExtraneousEnvStorageParams(c *C) { + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable +// that changes the storage type will be reflected in the parsed Configuration struct and that +// environment storage parameters will also be included +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} + suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log +// level to the same as the one provided in the yaml will not change the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { + os.Setenv("REGISTRY_LOGLEVEL", "info") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the +// log level will override the value provided in the yaml document +func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { + suite.expectedConfig.Loglevel = "error" + + os.Setenv("REGISTRY_LOGLEVEL", "error") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidLoglevel validates that the parser will fail to parse a +// configuration if the loglevel is malformed +func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { + invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" + _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) + c.Assert(err, NotNil) + + os.Setenv("REGISTRY_LOGLEVEL", "derp") + + _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) + +} + +// TestParseWithDifferentEnvReporting validates that environment variables +// properly override reporting parameters +func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { + suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" + suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" + + os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration +// version than the CurrentVersion +func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { + suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + _, err = Parse(bytes.NewReader(configBytes)) + c.Assert(err, NotNil) +} + +func copyConfig(config Configuration) *Configuration { + configCopy := new(Configuration) + + configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) + configCopy.Loglevel = config.Loglevel + configCopy.Log = config.Log + configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) + for k, v := range config.Log.Fields { + configCopy.Log.Fields[k] = v + } + + configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} + for k, v := range config.Storage.Parameters() { + configCopy.Storage.setParameter(k, v) + } + configCopy.Reporting = Reporting{ + Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, + NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, + } + + configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} + for k, v := range config.Auth.Parameters() { + configCopy.Auth.setParameter(k, v) + } + + configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} + for _, v := range config.Notifications.Endpoints { + configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) + } + + return configCopy +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go new file mode 100644 index 00000000..10a0461e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go @@ -0,0 +1,203 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "regexp" + "strconv" + "strings" + + "gopkg.in/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env map[string]string +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo), env: make(map[string]string)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env[envParts[0]] = envParts[1] + } + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + err = p.overwriteFields(parseAs, p.prefix) + if err != nil { + return err + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +func (p *Parser) overwriteFields(v reflect.Value, prefix string) error { + for v.Kind() == reflect.Ptr { + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + fieldPrefix := strings.ToUpper(prefix + "_" + sf.Name) + if e, ok := p.env[fieldPrefix]; ok { + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(e), fieldVal.Interface()) + if err != nil { + return err + } + v.Field(i).Set(reflect.Indirect(fieldVal)) + } + err := p.overwriteFields(v.Field(i), fieldPrefix) + if err != nil { + return err + } + } + case reflect.Map: + p.overwriteMap(v, prefix) + } + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, prefix string) error { + switch m.Type().Elem().Kind() { + case reflect.Struct: + for _, k := range m.MapKeys() { + err := p.overwriteFields(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) + if err != nil { + return err + } + } + envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) + if err != nil { + return err + } + for key, val := range p.env { + if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { + mapValue := reflect.New(m.Type().Elem()) + err := yaml.Unmarshal([]byte(val), mapValue.Interface()) + if err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) + } + } + case reflect.Map: + for _, k := range m.MapKeys() { + err := p.overwriteMap(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) + if err != nil { + return err + } + } + default: + envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) + if err != nil { + return err + } + + for key, val := range p.env { + if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { + mapValue := reflect.New(m.Type().Elem()) + err := yaml.Unmarshal([]byte(val), mapValue.Interface()) + if err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go new file mode 100644 index 00000000..23cbf5b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go @@ -0,0 +1,85 @@ +package context + +import ( + "sync" + + "github.com/docker/distribution/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go new file mode 100644 index 00000000..a63989e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go @@ -0,0 +1,76 @@ +// Package context provides several utilities for working with +// golang.org/x/net/context in http requests. Primarily, the focus is on +// logging relevent request information but this package is not limited to +// that purpose. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped varaibles. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go new file mode 100644 index 00000000..f61e3bc2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go @@ -0,0 +1,342 @@ +package context + +import ( + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx Context, r *http.Request) Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + closeNotifier, ok := w.(http.CloseNotifier) + if !ok { + panic("the ResponseWriter does not implement CloseNotifier") + } + irw := &instrumentedResponseWriter{ + ResponseWriter: w, + CloseNotifier: closeNotifier, + Context: ctx, + } + + return irw, irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx Context, r *http.Request) Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriter provides response writer information in a +// context. +type instrumentedResponseWriter struct { + http.ResponseWriter + http.CloseNotifier + Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go new file mode 100644 index 00000000..ae88a314 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go @@ -0,0 +1,292 @@ +package context + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = "GET" + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(testcase.key) + + if v == nil { + t.Fatalf("value not found for %q", testcase.key) + } + + if testcase.expected != nil && v != testcase.expected { + t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) + } + + // Key specific checks! + switch testcase.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +// CloseNotify is only here to make the testResponseWriter implement the +// http.CloseNotifier interface, which WithResponseWriter expects to be +// implemented. +func (trw *testResponseWriter) CloseNotify() <-chan bool { + return make(chan bool) +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(testcase.key) + + if !reflect.DeepEqual(v, testcase.expected) { + t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) + } + } +} + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go new file mode 100644 index 00000000..78e4212a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go @@ -0,0 +1,101 @@ +package context + +import ( + "fmt" + + "github.com/Sirupsen/logrus" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) +} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[string]interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithFields(logrus.Fields(fields)) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value("logger") + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + // If no logger is found, just return the standard logger. + logger = logrus.NewEntry(logrus.StandardLogger()) + } + + fields := logrus.Fields{} + + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go new file mode 100644 index 00000000..af4f1351 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -0,0 +1,104 @@ +package context + +import ( + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapased time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go new file mode 100644 index 00000000..4b969fbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { + + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go new file mode 100644 index 00000000..c0aff00d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go @@ -0,0 +1,32 @@ +package context + +import ( + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx Context, key interface{}) time.Duration { + startedAtI := ctx.Value(key) + if startedAtI != nil { + if startedAt, ok := startedAtI.(time.Time); ok { + return time.Since(startedAt) + } + } + + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx Context, key string) (value string) { + stringi := ctx.Value(key) + if stringi != nil { + if valuev, ok := stringi.(string); ok { + value = valuev + } + } + + return value +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD new file mode 100644 index 00000000..f7e14b5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD @@ -0,0 +1,36 @@ +# Apache HTTPd sample for Registry v1, v2 and mirror + +3 containers involved + +* Docker Registry v1 (registry 0.9.1) +* Docker Registry v2 (registry 2.0.0) +* Docker Registry v1 in mirror mode + +HTTP for mirror and HTTPS for v1 & v2 + +* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode +* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode + +## 3 Docker containers should be started + +* Docker Registry 1.0 in Mirror mode : port 5001 +* Docker Registry 1.0 in Hosting mode : port 5000 +* Docker Registry 2.0 in Hosting mode : port 5002 + +### Registry v1 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" + +### Mirror + + docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ + -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" + +### Registry v2 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2.0" + +# For Hosting mode access + +* users should have account (valid-user) to be able to fetch images +* only users using account docker-deployer will be allowed to push images diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf new file mode 100644 index 00000000..3300a7c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf @@ -0,0 +1,127 @@ +# +# Sample Apache 2.x configuration where : +# + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /_ping http://localhost:5001/_ping + ProxyPassReverse /_ping http://localhost:5001/_ping + + ProxyPass /v1 http://localhost:5001/v1 + ProxyPassReverse /v1 http://localhost:5001/v1 + + # Logs + ErrorLog ${APACHE_LOG_DIR}/mirror_error_log + CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog + + + + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt + SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key + + # Higher Strength SSL Ciphers + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLCipherSuite RC4-SHA:HIGH + SSLHonorCipherOrder on + + # Logs + ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log + CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + # + # Registry v1 + # + + ProxyPass /v1 http://localhost:5000/v1 + ProxyPassReverse /v1 http://localhost:5000/v1 + + ProxyPass /_ping http://localhost:5000/_ping + ProxyPassReverse /_ping http://localhost:5000/_ping + + # Authentication require for push + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer account only + + Require user docker-deployer + + + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # + # Registry v2 + # + + ProxyPass /v2 http://localhost:5002/v2 + ProxyPassReverse /v2 http://localhost:5002/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require user docker-deployer + + + + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh new file mode 100644 index 00000000..d907cf5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh @@ -0,0 +1,119 @@ +#! /bin/bash +# +# Ceph cluster setup in Circle CI +# + +set -x +set -e +set -u + +NODE=$(hostname) +CEPHDIR=/tmp/ceph + +mkdir cluster +pushd cluster + +# Install +retries=0 +until [ $retries -ge 5 ]; do + pip install ceph-deploy && break + retries=$[$retries+1] + sleep 30 +done + +retries=0 +until [ $retries -ge 5 ]; do + ceph-deploy install --release hammer $NODE && break + retries=$[$retries+1] + sleep 30 +done + +retries=0 +until [ $retries -ge 5 ]; do + ceph-deploy pkg --install librados-dev $NODE && break + retries=$[$retries+1] + sleep 30 +done + +echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys +ssh-keyscan $NODE >> ~/.ssh/known_hosts +ceph-deploy new $NODE + +cat >> ceph.conf < 74acc70fa106 + Removing intermediate container edb84c2b40cb + Successfully built 74acc70fa106 + + The commmand outputs its progress until it completes. + +4. Start your configuration with compose. + + $ docker-compose up + Recreating compose_registryv1_1... + Recreating compose_registryv2_1... + Recreating compose_nginx_1... + Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 + ... + + +5. In another terminal, display the running configuration. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 + 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 + aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 + +### Explore a bit + +1. Check for TLS on your `nginx` server. + + $ curl -v https://localhost:5000 + * Rebuilt URL to: https://localhost:5000/ + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 5000 (#0) + * successfully set certificate verify locations: + * CAfile: none + CApath: /etc/ssl/certs + * SSLv3, TLS handshake, Client hello (1): + * SSLv3, TLS handshake, Server hello (2): + * SSLv3, TLS handshake, CERT (11): + * SSLv3, TLS alert, Server hello (2): + * SSL certificate problem: self signed certificate + * Closing connection 0 + curl: (60) SSL certificate problem: self signed certificate + More details here: http://curl.haxx.se/docs/sslcerts.html + +2. Tag the `v1` registry image. + + $ docker tag registry:latest localhost:5000/registry_one:latest + +2. Push it to the localhost. + + $ docker push localhost:5000/registry_one:latest + + If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. + +4. Use `curl` to list the image in the registry. + + $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 32777 (#0) + > GET /v2/registry1/tags/list HTTP/1.1 + > User-Agent: curl/7.36.0 + > Host: localhost:32777 + > Accept: */* + > + < HTTP/1.1 200 OK + < Content-Type: application/json; charset=utf-8 + < Docker-Distribution-Api-Version: registry/2.0 + < Date: Tue, 14 Apr 2015 22:34:13 GMT + < Content-Length: 39 + < + {"name":"registry1","tags":["latest"]} + * Connection #0 to host localhost left intact + + This example refers to the specific port assigned to the 2.0 registry. You saw + this port earlier, when you used `docker ps` to show your running containers. + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml new file mode 100644 index 00000000..5cd04858 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml @@ -0,0 +1,15 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile new file mode 100644 index 00000000..2b252ec7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile @@ -0,0 +1,6 @@ +FROM nginx:1.7 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf new file mode 100644 index 00000000..65c4d776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf new file mode 100644 index 00000000..7b039a54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf new file mode 100644 index 00000000..63cd180d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf new file mode 100644 index 00000000..47ffd237 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf @@ -0,0 +1,41 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + # auth_basic "registry.localhost"; + # auth_basic_user_file test.password; + # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile new file mode 100644 index 00000000..6061e99e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile @@ -0,0 +1,46 @@ +FROM debian:jessie + +MAINTAINER Docker Distribution Team + +# compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + # For DIND + ca-certificates \ + curl \ + iptables \ + procps \ + e2fsprogs \ + xz-utils \ + # For build + build-essential \ + file \ + git \ + net-tools \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install Docker +ENV VERSION 1.7.1 +RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ + && chmod +x /usr/local/bin/docker + +# Install DIND +RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/master/hack/dind \ + && chmod +x /dind + +# Install bats +RUN cd /usr/local/src/ \ + && git clone https://github.com/sstephenson/bats.git \ + && cd bats \ + && ./install.sh /usr/local + +# Install docker-compose +RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ + && chmod +x /usr/local/bin/docker-compose + +RUN mkdir -p /go/src/github.com/docker/distribution +WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration + +VOLUME /var/lib/docker + +ENTRYPOINT ["/dind"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md new file mode 100644 index 00000000..e12bec1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md @@ -0,0 +1,138 @@ +# Docker Registry Integration Testing + +These integration tests cover interactions between the Docker daemon and the +registry server. All tests are run using the docker cli. + +The compose configuration is intended to setup a testing environment for Docker +using multiple registry configurations. These configurations include different +combinations of a v1 and v2 registry as well as TLS configurations. + +## Running inside of Docker +### Get integration container +The container image to run the integation tests will need to be pulled or built +locally. + +*Building locally* +``` +$ docker build -t distribution/docker-integration . +``` + +### Run script + +Invoke the tests within Docker through the `run.sh` script. + +``` +$ ./run.sh +``` + +Run with aufs driver and tmp volume +**NOTE: Using a volume will prevent multiple runs from needing to +re-pull images** +``` +$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh +``` + +### Example developer flow + +These tests are useful for developing both as a registry and docker +core developer. The following setup may be used to do integration +testing between development versions + +Insert into your `.zshrc` or `.bashrc` + +``` +# /usr/lib/docker for Docker-in-Docker +# Set this directory to make each invocation run much faster, without +# the need to repull images. +export DOCKER_VOLUME=$HOME/.docker-test-volume + +# Use overlay for all Docker testing, try aufs if overlay not supported +export DOCKER_GRAPHDRIVER=overlay + +# Name this according to personal preference +function rdtest() { + if [ "$1" != "" ]; then + DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker + if [ ! -f $DOCKER_BINARY ]; then + current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` + echo "$DOCKER_BINARY does not exist" + echo "Current checked out docker version: $current_version" + echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" + return 1 + fi + fi + + $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh +} +``` + +Run with Docker release version +``` +$ rdtest +``` + +Run using local development version of docker +``` +$ cd $GOPATH/src/github.com/docker/docker +$ make binary +$ rdtest `cat VERSION` +``` + +## Running manually outside of Docker + +### Install Docker Compose + +[Docker Compose Installation Guide](http://docs.docker.com/compose/install/) + +### Start compose setup +``` +docker-compose up +``` + +### Install Certificates +The certificates must be installed in /etc/docker/cert.d in order to use TLS +client auth and use the CA certificate. +``` +sudo sh ./install_certs.sh +``` + +### Test with Docker +Tag an image as with any other private registry. Attempt to push the image. + +``` +docker pull hello-world +docker tag hello-world localhost:5440/hello-world +docker push localhost:5440/hello-world + +docker tag hello-world localhost:5441/hello-world +docker push localhost:5441/hello-world +# Perform login using user `testuser` and password `passpassword` +``` + +### Set /etc/hosts entry +Find the non-localhost ip address of local machine + +### Run bats +Run the bats tests after updating /etc/hosts, installing the certificates, and +running the `docker-compose` script. +``` +bats -p . +``` + +## Configurations + +Port | V2 | V1 | TLS | Authentication +--- | --- | --- | --- | --- +5000 | yes | yes | no | none +5001 | no | yes | no | none +5002 | yes | no | no | none +5011 | no | yes | yes | none +5440 | yes | yes | yes | none +5441 | yes | yes | yes | basic (testuser/passpassword) +5442 | yes | yes | yes | TLS client +5443 | yes | yes | yes | TLS client (no CA) +5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) +5445 | yes | yes | yes (no CA) | none +5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) +5447 | yes | yes | yes (no CA) | TLS client +5448 | yes | yes | yes (SSLv3) | none diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml new file mode 100644 index 00000000..d664c7bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml @@ -0,0 +1,27 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + - "5001:5001" + - "5002:5002" + - "5011:5011" + - "5440:5440" + - "5441:5441" + - "5442:5442" + - "5443:5443" + - "5444:5444" + - "5445:5445" + - "5446:5446" + - "5447:5447" + - "5448:5448" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry:0.9.1 + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh new file mode 100644 index 00000000..c1fa2b20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh @@ -0,0 +1,38 @@ +#!/bin/sh +set -e + +hostname=$1 +if [ "$hostname" = "" ]; then + hostname="localhost" +fi + +mkdir -p /etc/docker/certs.d/$hostname:5011 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5440 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5441 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5442 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5443 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt +cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert +cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5444 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5447 +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5448 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile new file mode 100644 index 00000000..04515e8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:1.9 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf +COPY registry-noauth.conf /etc/nginx/registry-noauth.conf +COPY registry-basic.conf /etc/nginx/registry-basic.conf +COPY test.passwd /etc/nginx/test.passwd +COPY ssl /etc/nginx/ssl diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf new file mode 100644 index 00000000..65c4d776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf new file mode 100644 index 00000000..5b1a2d58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf new file mode 100644 index 00000000..63cd180d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf new file mode 100644 index 00000000..3c629ae8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf @@ -0,0 +1,13 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + include docker-registry-v2.conf; +} +location / { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + include docker-registry.conf; +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf new file mode 100644 index 00000000..883a2d48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf @@ -0,0 +1,8 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + include docker-registry-v2.conf; +} +location / { + include docker-registry.conf; +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf new file mode 100644 index 00000000..b402eacb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf @@ -0,0 +1,277 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + +# No client auth or TLS (V1 Only) +server { + listen 5001; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry.conf; + } +} + +# No client auth or TLS (V2 Only) +server { + listen 5002; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry-v2.conf; + } +} + +# TLS localhost (V1 Only) +server { + listen 5011; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + + client_max_body_size 0; + chunked_transfer_encoding on; + location / { + include docker-registry.conf; + } +} + +# TLS localregistry (V1 Only) +server { + listen 5011; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + + client_max_body_size 0; + chunked_transfer_encoding on; + location / { + include docker-registry.conf; + } +} + + + +# TLS Configuration chart +# Username/Password: testuser/passpassword +# | ca | client | basic | notes +# 5440 | yes | no | no | Tests CA certificate +# 5441 | yes | no | yes | Tests basic auth over TLS +# 5442 | yes | yes | no | Tests client auth with client CA +# 5443 | yes | yes | no | Tests client auth without client CA +# 5444 | yes | yes | yes | Tests using basic auth + tls auth +# 5445 | no | no | no | Tests insecure using TLS +# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS +# 5447 | no | yes | no | Tests client auth to insecure +# 5448 | yes | no | no | Bad SSL version + +server { + listen 5440; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + +# Add configuration for localregistry server_name +# Requires configuring /etc/hosts to use +# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing +# Docker secure/insecure registry features +server { + listen 5440; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd new file mode 100644 index 00000000..4e55de81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd @@ -0,0 +1 @@ +testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh new file mode 100644 index 00000000..81ca2ad9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +# Root directory of Distribution +DISTRIBUTION_ROOT=$(cd ../..; pwd -P) + +volumeMount="" +if [ "$DOCKER_VOLUME" != "" ]; then + volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" +fi + +dockerMount="" +if [ "$DOCKER_BINARY" != "" ]; then + dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" +fi + +# Image containing the integration tests environment. +INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} + +# Make sure we upgrade the integration environment. +docker pull $INTEGRATION_IMAGE + +# Start the integration tests in a Docker container. +docker run --rm -t --privileged $volumeMount $dockerMount \ + -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ + -e "STORAGE_DRIVER=$DOCKER_GRAPHDRIVER" \ + -e "EXEC_DRIVER=$EXEC_DRIVER" \ + ${INTEGRATION_IMAGE} \ + ./test_runner.sh "$@" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh new file mode 100644 index 00000000..1917b688 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +# Run the integration tests with multiple versions of the Docker engine + +set -e +set -x + +# Don't use /tmp because this isn't available in boot2docker +tmpdir_template="`pwd`/docker-versions.XXXXX" +tmpdir=`mktemp -d "$tmpdir_template"` +trap "rm -rf $tmpdir" EXIT + +if [ "$1" == "-d" ]; then + # Start docker daemon + + # Drivers to use for Docker engines the tests are going to create. + STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} + EXEC_DRIVER=${EXEC_DRIVER:-native} + + docker --daemon --log-level=panic \ + --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & + DOCKER_PID=$! + + # Wait for it to become reachable. + tries=10 + until docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 + done +fi + +# If DOCKER_VOLUME is unset, create a temporary directory to cache containers +# between runs +# Only do this on Linux, because using /var/lib/docker from a host volume seems +# problematic with boot2docker. +if [ "$DOCKER_VOLUME" = "" -a `uname` = "Linux" ]; then + volumes_template="`pwd`/docker-versions.XXXXX" + volume=`mktemp -d "$volumes_template"` + trap "rm -rf $tmpdir $volume" EXIT +else + volume="$DOCKER_VOLUME" +fi + +# Released versions + +versions="1.6.0 1.6.1 1.7.0 1.7.1" + +for v in $versions; do + echo "Extracting Docker $v from dind image" + binpath="$tmpdir/docker-$v/docker" + ID=$(docker create dockerswarm/dind:$v) + docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" + + echo "Running tests with Docker $v" + DOCKER_BINARY="$binpath" DOCKER_VOLUME="$volume" ./run.sh + + # Cleanup. + docker rm -f "$ID" +done + +# Latest experimental version + +echo "Extracting Docker master from dind image" +binpath="$tmpdir/docker-master/docker" +docker pull dockerswarm/dind-master +ID=$(docker create dockerswarm/dind-master) +docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" + +echo "Running tests with Docker master" +DOCKER_BINARY="$binpath" DOCKER_VOLUME="$volume" ./run.sh + +# Cleanup. +docker rm -f "$ID" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh new file mode 100644 index 00000000..2c958c5e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +# Load the helpers. +#. helpers.bash + +TESTS=${@:-.} + +# Drivers to use for Docker engines the tests are going to create. +STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} +EXEC_DRIVER=${EXEC_DRIVER:-native} + + +function execute() { + >&2 echo "++ $@" + eval "$@" +} + +# Set IP address in /etc/hosts for localregistry +IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') +execute echo "$IP localregistry" >> /etc/hosts + +# Setup certificates +execute sh install_certs.sh localregistry + +# Start the docker engine. +execute docker --daemon --log-level=panic \ + --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & +DOCKER_PID=$! + +# Wait for it to become reachable. +tries=10 +until docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 +done + +execute time docker-compose build + +execute docker-compose up -d + +# Run the tests. +execute time bats -p $TESTS + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats new file mode 100644 index 00000000..8b7ae287 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats @@ -0,0 +1,102 @@ +# Registry host name, should be set to non-localhost address and match +# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d +hostname="localregistry" + +image="hello-world:latest" + +# Login information, should match values in nginx/test.passwd +user="testuser" +password="passpassword" +email="distribution@docker.com" + +function setup() { + docker pull $image +} + +# skip basic auth tests with Docker 1.6, where they don't pass due to +# certificate issues +function basic_auth_version_check() { + run sh -c 'docker version | fgrep -q "Client version: 1.6."' + if [ "$status" -eq 0 ]; then + skip "Basic auth tests don't support 1.6.x" + fi +} + +# has_digest enforces the last output line is "Digest: sha256:..." +# the input is the name of the array containing the output lines +function has_digest() { + filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') + [ "$filtered" != "" ] +} + +function login() { + run docker login -u $user -p $password -e $email $1 + [ "$status" -eq 0 ] + # First line is WARNING about credential save + [ "${lines[1]}" = "Login Succeeded" ] +} + +@test "Test valid certificates" { + docker tag -f $image $hostname:5440/$image + run docker push $hostname:5440/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth" { + basic_auth_version_check + login $hostname:5441 + docker tag -f $image $hostname:5441/$image + run docker push $hostname:5441/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client auth" { + docker tag -f $image $hostname:5442/$image + run docker push $hostname:5442/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client with invalid certificate authority fails" { + docker tag -f $image $hostname:5443/$image + run docker push $hostname:5443/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with TLS client auth" { + basic_auth_version_check + login $hostname:5444 + docker tag -f $image $hostname:5444/$image + run docker push $hostname:5444/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test unknown certificate authority fails" { + docker tag -f $image $hostname:5445/$image + run docker push $hostname:5445/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with unknown certificate authority fails" { + run login $hostname:5446 + [ "$status" -ne 0 ] + docker tag -f $image $hostname:5446/$image + run docker push $hostname:5446/$image + [ "$status" -ne 0 ] +} + +@test "Test TLS client auth to server with unknown certificate authority fails" { + docker tag -f $image $hostname:5447/$image + run docker push $hostname:5447/$image + [ "$status" -ne 0 ] +} + +@test "Test failure to connect to server fails to fallback to SSLv3" { + docker tag -f $image $hostname:5448/$image + run docker push $hostname:5448/$image + [ "$status" -ne 0 ] +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go new file mode 100644 index 00000000..68991685 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go @@ -0,0 +1,168 @@ +package digest + +import ( + "bytes" + "fmt" + "hash" + "io" + "io/ioutil" + "regexp" + "strings" + + "github.com/docker/docker/pkg/tarsum" +) + +const ( + // DigestTarSumV1EmptyTar is the digest for the empty tar file. + DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// More important for this code base, this type is compatible with tarsum +// digests. For example, the following would be a valid Digest: +// +// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content. +func FromReader(rd io.Reader) (Digest, error) { + digester := Canonical.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromTarArchive produces a tarsum digest from reader rd. +func FromTarArchive(rd io.Reader) (Digest, error) { + ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1) + if err != nil { + return "", err + } + + if _, err := io.Copy(ioutil.Discard, ts); err != nil { + return "", err + } + + d, err := ParseDigest(ts.Sum(nil)) + if err != nil { + return "", err + } + + return d, nil +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) (Digest, error) { + return FromReader(bytes.NewReader(p)) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + // Common case will be tarsum + _, err := ParseTarSum(s) + if err == nil { + return nil + } + + // Continue on for general parser + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch Algorithm(s[:i]) { + case SHA256, SHA384, SHA512: + break + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go new file mode 100644 index 00000000..41c8bee8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go @@ -0,0 +1,111 @@ +package digest + +import ( + "bytes" + "io" + "testing" +) + +func TestParseDigest(t *testing.T) { + for _, testcase := range []struct { + input string + err error + algorithm Algorithm + hex string + }{ + { + input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "tarsum+sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "tarsum.dev+sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + algorithm: "tarsum.v1+sha256", + hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + }, + { + input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384", + hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + }, + { + // empty hex + input: "sha256:", + err: ErrDigestInvalidFormat, + }, + { + // just hex + input: "d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // not hex + input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + input: "foo:d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestUnsupported, + }, + } { + digest, err := ParseDigest(testcase.input) + if err != testcase.err { + t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) + } + + if testcase.err != nil { + continue + } + + if digest.Algorithm() != testcase.algorithm { + t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) + } + + if digest.Hex() != testcase.hex { + t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) + } + + // Parse string return value and check equality + newParsed, err := ParseDigest(digest.String()) + + if err != nil { + t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) + } + + if newParsed != digest { + t.Fatalf("expected equal: %q != %q", newParsed, digest) + } + } +} + +// A few test cases used to fix behavior we expect in storage backend. + +func TestFromTarArchiveZeroLength(t *testing.T) { + checkTarsumDigest(t, "zero-length archive", bytes.NewReader([]byte{}), DigestTarSumV1EmptyTar) +} + +func TestFromTarArchiveEmptyTar(t *testing.T) { + // String of 1024 zeros is a valid, empty tar file. + checkTarsumDigest(t, "1024 zero bytes", bytes.NewReader(bytes.Repeat([]byte("\x00"), 1024)), DigestTarSumV1EmptyTar) +} + +func checkTarsumDigest(t *testing.T, msg string, rd io.Reader, expected Digest) { + dgst, err := FromTarArchive(rd) + if err != nil { + t.Fatalf("unexpected error digesting %s: %v", msg, err) + } + + if dgst != expected { + t.Fatalf("unexpected digest for %s: %q != %q", msg, dgst, expected) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go new file mode 100644 index 00000000..556dd93a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go @@ -0,0 +1,95 @@ +package digest + +import ( + "crypto" + "hash" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, nil is +// returned. Make sure to check Available before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + return nil + } + + return algorithms[a].New() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go new file mode 100644 index 00000000..6ba21c80 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go @@ -0,0 +1,21 @@ +// +build !noresumabledigest + +package digest + +import ( + "testing" + + "github.com/stevvooe/resumable" + _ "github.com/stevvooe/resumable/sha256" +) + +// TestResumableDetection just ensures that the resumable capability of a hash +// is exposed through the digester type, which is just a hash plus a Digest +// method. +func TestResumableDetection(t *testing.T) { + d := Canonical.New() + + if _, ok := d.Hash().(resumable.Hash); !ok { + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go new file mode 100644 index 00000000..278c50e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go @@ -0,0 +1,52 @@ +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with tarsums and +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". A tarsum example will be more illustrative of the use case +// involved in the registry: +// +// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b +// +// For this, we consider the algorithm to be "tarsum+sha256". Prudent +// applications will favor the ParseDigest function to verify the format over +// using simple type casts. However, a normal string can be cast as a digest +// with a simple type conversion: +// +// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b") +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go new file mode 100644 index 00000000..271d35db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go @@ -0,0 +1,195 @@ +package digest + +import ( + "errors" + "sort" + "strings" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are ommited from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (Digest, error) { + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg Algorithm + hex string + ) + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digests to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// table, this operation will be a no-op. +func (dst *Set) Add(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[Digest]string { + m := make(map[Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg Algorithm + val string + digest Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go new file mode 100644 index 00000000..faeba6d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go @@ -0,0 +1,272 @@ +package digest + +import ( + "crypto/sha256" + "encoding/binary" + "math/rand" + "testing" +) + +func assertEqualDigests(t *testing.T, d1, d2 Digest) { + if d1 != d2 { + t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) + } +} + +func TestLookup(t *testing.T) { + digests := []Digest{ + "sha256:12345", + "sha256:1234", + "sha256:12346", + "sha256:54321", + "sha256:65431", + "sha256:64321", + "sha256:65421", + "sha256:65321", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup("54") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[3]) + + dgst, err = dset.Lookup("1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("9876") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 9876") + } + if err != ErrDigestNotFound { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:1234") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[1]) + + dgst, err = dset.Lookup("sha256:12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) + + dgst, err = dset.Lookup("sha256:12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) +} + +func TestAddDuplication(t *testing.T) { + digests := []Digest{ + "sha256:1234", + "sha256:12345", + "sha256:12346", + "sha256:54321", + "sha256:65431", + "sha512:65431", + "sha512:65421", + "sha512:65321", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + if len(dset.entries) != 8 { + t.Fatal("Invalid dset size") + } + + if err := dset.Add(Digest("sha256:12345")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 8 { + t.Fatal("Duplicate digest insert allowed") + } + + if err := dset.Add(Digest("sha384:12345")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 9 { + t.Fatal("Insert with different algorithm not allowed") + } +} + +func assertEqualShort(t *testing.T, actual, expected string) { + if actual != expected { + t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) + } +} + +func TestShortCodeTable(t *testing.T) { + digests := []Digest{ + "sha256:1234", + "sha256:12345", + "sha256:12346", + "sha256:54321", + "sha256:65431", + "sha256:64321", + "sha256:65421", + "sha256:65321", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dump := ShortCodeTable(dset, 2) + + if len(dump) < len(digests) { + t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) + } + + assertEqualShort(t, dump[digests[0]], "sha256:1234") + assertEqualShort(t, dump[digests[1]], "sha256:12345") + assertEqualShort(t, dump[digests[2]], "sha256:12346") + assertEqualShort(t, dump[digests[3]], "54") + assertEqualShort(t, dump[digests[4]], "6543") + assertEqualShort(t, dump[digests[5]], "64") + assertEqualShort(t, dump[digests[6]], "6542") + assertEqualShort(t, dump[digests[7]], "653") +} + +func createDigests(count int) ([]Digest, error) { + r := rand.New(rand.NewSource(25823)) + digests := make([]Digest, count) + for i := range digests { + h := sha256.New() + if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { + return nil, err + } + digests[i] = NewDigest("sha256", h) + } + return digests, nil +} + +func benchAddNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchLookupNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + shorts := make([]string, 0, n) + for _, short := range ShortCodeTable(dset, shortLen) { + shorts = append(shorts, short) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err = dset.Lookup(shorts[i%n]); err != nil { + b.Fatal(err) + } + } +} + +func benchShortCodeNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShortCodeTable(dset, shortLen) + } +} + +func BenchmarkAdd10(b *testing.B) { + benchAddNTable(b, 10) +} + +func BenchmarkAdd100(b *testing.B) { + benchAddNTable(b, 100) +} + +func BenchmarkAdd1000(b *testing.B) { + benchAddNTable(b, 1000) +} + +func BenchmarkLookup10(b *testing.B) { + benchLookupNTable(b, 10, 12) +} + +func BenchmarkLookup100(b *testing.B) { + benchLookupNTable(b, 100, 12) +} + +func BenchmarkLookup1000(b *testing.B) { + benchLookupNTable(b, 1000, 12) +} + +func BenchmarkShortCode10(b *testing.B) { + benchShortCodeNTable(b, 10, 12) +} +func BenchmarkShortCode100(b *testing.B) { + benchShortCodeNTable(b, 100, 12) +} +func BenchmarkShortCode1000(b *testing.B) { + benchShortCodeNTable(b, 1000, 12) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go new file mode 100644 index 00000000..702d7dc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go @@ -0,0 +1,70 @@ +package digest + +import ( + "fmt" + + "regexp" +) + +// TarSumRegexp defines a regular expression to match tarsum identifiers. +var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+") + +// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with +// capture groups corresponding to each component. +var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)") + +// TarSumInfo contains information about a parsed tarsum. +type TarSumInfo struct { + // Version contains the version of the tarsum. + Version string + + // Algorithm contains the algorithm for the final digest + Algorithm string + + // Digest contains the hex-encoded digest. + Digest string +} + +// InvalidTarSumError provides informations about a TarSum that cannot be parsed +// by ParseTarSum. +type InvalidTarSumError string + +func (e InvalidTarSumError) Error() string { + return fmt.Sprintf("invalid tarsum: %q", string(e)) +} + +// ParseTarSum parses a tarsum string into its components of interest. For +// example, this method may receive the tarsum in the following format: +// +// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e +// +// The function will return the following: +// +// TarSumInfo{ +// Version: "v1", +// Algorithm: "sha256", +// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", +// } +// +func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) { + components := TarsumRegexpCapturing.FindStringSubmatch(tarSum) + + if len(components) != 1+TarsumRegexpCapturing.NumSubexp() { + return TarSumInfo{}, InvalidTarSumError(tarSum) + } + + return TarSumInfo{ + Version: components[3], + Algorithm: components[4], + Digest: components[5], + }, nil +} + +// String returns the valid, string representation of the tarsum info. +func (tsi TarSumInfo) String() string { + if tsi.Version == "" { + return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest) + } + + return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go new file mode 100644 index 00000000..894c25ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go @@ -0,0 +1,79 @@ +package digest + +import ( + "reflect" + "testing" +) + +func TestParseTarSumComponents(t *testing.T) { + for _, testcase := range []struct { + input string + expected TarSumInfo + err error + }{ + { + input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + expected: TarSumInfo{ + Version: "v1", + Algorithm: "sha256", + Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + }, + }, + { + input: "", + err: InvalidTarSumError(""), + }, + { + input: "purejunk", + err: InvalidTarSumError("purejunk"), + }, + { + input: "tarsum.v23+test:12341234123412341effefefe", + expected: TarSumInfo{ + Version: "v23", + Algorithm: "test", + Digest: "12341234123412341effefefe", + }, + }, + + // The following test cases are ported from docker core + { + // Version 0 tarsum + input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + expected: TarSumInfo{ + Algorithm: "sha256", + Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + }, + { + // Dev version tarsum + input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + expected: TarSumInfo{ + Version: "dev", + Algorithm: "sha256", + Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + }, + } { + tsi, err := ParseTarSum(testcase.input) + if err != nil { + if testcase.err != nil && err == testcase.err { + continue // passes + } + + t.Fatalf("unexpected error parsing tarsum: %v", err) + } + + if testcase.err != nil { + t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err) + } + + if !reflect.DeepEqual(tsi, testcase.expected) { + t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected) + } + + if testcase.input != tsi.String() { + t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go new file mode 100644 index 00000000..f8c75b53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go @@ -0,0 +1,122 @@ +package digest + +import ( + "hash" + "io" + "io/ioutil" + + "github.com/docker/docker/pkg/tarsum" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) (Verifier, error) { + if err := d.Validate(); err != nil { + return nil, err + } + + alg := d.Algorithm() + switch alg { + case "sha256", "sha384", "sha512": + return hashVerifier{ + hash: alg.Hash(), + digest: d, + }, nil + default: + // Assume we have a tarsum. + version, err := tarsum.GetVersionFromTarsum(string(d)) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + + // TODO(stevvooe): We may actually want to ban the earlier versions of + // tarsum. That decision may not be the place of the verifier. + + ts, err := tarsum.NewTarSum(pr, true, version) + if err != nil { + return nil, err + } + + // TODO(sday): Ick! A goroutine per digest verification? We'll have to + // get the tarsum library to export an io.Writer variant. + go func() { + if _, err := io.Copy(ioutil.Discard, ts); err != nil { + pr.CloseWithError(err) + } else { + pr.Close() + } + }() + + return &tarsumVerifier{ + digest: d, + ts: ts, + pr: pr, + pw: pw, + }, nil + } +} + +// NewLengthVerifier returns a verifier that returns true when the number of +// read bytes equals the expected parameter. +func NewLengthVerifier(expected int64) Verifier { + return &lengthVerifier{ + expected: expected, + } +} + +type lengthVerifier struct { + expected int64 // expected bytes read + len int64 // bytes read +} + +func (lv *lengthVerifier) Write(p []byte) (n int, err error) { + n = len(p) + lv.len += int64(n) + return n, err +} + +func (lv *lengthVerifier) Verified() bool { + return lv.expected == lv.len +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} + +type tarsumVerifier struct { + digest Digest + ts tarsum.TarSum + pr *io.PipeReader + pw *io.PipeWriter +} + +func (tv *tarsumVerifier) Write(p []byte) (n int, err error) { + return tv.pw.Write(p) +} + +func (tv *tarsumVerifier) Verified() bool { + return tv.digest == Digest(tv.ts.Sum(nil)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go new file mode 100644 index 00000000..5ee79f34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go @@ -0,0 +1,162 @@ +package digest + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "io" + "os" + "strings" + "testing" + + "github.com/docker/distribution/testutil" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest, err := FromBytes(p) + if err != nil { + t.Fatalf("unexpected error digesting bytes: %#v", err) + } + + verifier, err := NewDigestVerifier(digest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } + + tf, tarSum, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating tarfile: %v", err) + } + + digest, err = FromTarArchive(tf) + if err != nil { + t.Fatalf("error digesting tarsum: %v", err) + } + + if digest.String() != tarSum { + t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum) + } + + expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size + tf.Seek(0, os.SEEK_SET) // seek back + + // This is the most relevant example for the registry application. It's + // effectively a read through pipeline, where the final sink is the digest + // verifier. + verifier, err = NewDigestVerifier(digest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + lengthVerifier := NewLengthVerifier(expectedSize) + rd := io.TeeReader(tf, lengthVerifier) + io.Copy(verifier, rd) + + if !lengthVerifier.Verified() { + t.Fatalf("verifier detected incorrect length") + } + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TestVerifierUnsupportedDigest ensures that unsupported digest validation is +// flowing through verifier creation. +func TestVerifierUnsupportedDigest(t *testing.T) { + unsupported := Digest("bean:0123456789abcdef") + + _, err := NewDigestVerifier(unsupported) + if err == nil { + t.Fatalf("expected error when creating verifier") + } + + if err != ErrDigestUnsupported { + t.Fatalf("incorrect error for unsupported digest: %v", err) + } +} + +// TestJunkNoDeadlock ensures that junk input into a digest verifier properly +// returns errors from the tarsum library. Specifically, we pass in a file +// with a "bad header" and should see the error from the io.Copy to verifier. +// This has been seen with gzipped tarfiles, mishandled by the tarsum package, +// but also on junk input, such as html. +func TestJunkNoDeadlock(t *testing.T) { + expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") + junk := bytes.Repeat([]byte{'a'}, 1024) + + verifier, err := NewDigestVerifier(expected) + if err != nil { + t.Fatalf("unexpected error creating verifier: %v", err) + } + + rd := bytes.NewReader(junk) + if _, err := io.Copy(verifier, rd); err == nil { + t.Fatalf("unexpected error verifying input data: %v", err) + } +} + +// TestBadTarNoDeadlock runs a tar with a "bad" tar header through digest +// verifier, ensuring that the verifier returns an error properly. +func TestBadTarNoDeadlock(t *testing.T) { + // TODO(stevvooe): This test is exposing a bug in tarsum where if we pass + // a gzipped tar file into tarsum, the library returns an error. This + // should actually work. When the tarsum package is fixed, this test will + // fail and we can remove this test or invert it. + + // This tarfile was causing deadlocks in verifiers due mishandled copy error. + // This is a gzipped tar, which we typically don't see but should handle. + // + // From https://registry-1.docker.io/v2/library/ubuntu/blobs/tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473 + const badTar = ` +H4sIAAAJbogA/0otSdZnoDEwMDAxMDc1BdJggE6D2YZGJobGBmbGRsZAdYYGBkZGDAqmtHYYCJQW +lyQWAZ1CqTnonhsiAAAAAP//AsV/YkEJTdMAGfFvZmA2Gv/0AAAAAAD//4LFf3F+aVFyarFeTmZx +CbXtAOVnMxMTXPFvbGpmjhb/xobmwPinSyCO8PgHAAAA///EVU9v2z4MvedTEMihl9a5/26/YTkU +yNKiTTDsKMt0rE0WDYmK628/ym7+bFmH2DksQACbIB/5+J7kObwiQsXc/LdYVGibLObRccw01Qv5 +19EZ7hbbZudVgWtiDFCSh4paYII4xOVxNgeHLXrYow+GXAAqgSuEQhzlTR5ZgtlsVmB+aKe8rswe +zzsOjwtoPGoTEGplHHhMCJqxSNUPwesbEGbzOXxR34VCHndQmjfhUKhEq/FURI0FqJKFR5q9NE5Z +qbaoBGoglAB+5TSK0sOh3c3UPkRKE25dEg8dDzzIWmqN2wG3BNY4qRL1VFFAoJJb5SXHU90n34nk +SUS8S0AeGwqGyXdZel1nn7KLGhPO0kDeluvN48ty9Q2269ft8/PTy2b5GfKuh9/2LBIWo6oz+N8G +uodmWLETg0mW4lMP4XYYCL4+rlawftpIO40SA+W6Yci9wRZE1MNOjmyGdhBQRy9OHpqOdOGh/wT7 +nZdOkHZ650uIK+WrVZdkgErJfnNEJysLnI5FSAj4xuiCQNpOIoNWmhyLByVHxEpLf3dkr+k9KMsV +xV0FhiVB21hgD3V5XwSqRdOmsUYr7oNtZXTVzyTHc2/kqokBy2ihRMVRTN+78goP5Ur/aMhz+KOJ +3h2UsK43kdwDo0Q9jfD7ie2RRur7MdpIrx1Z3X4j/Q1qCswN9r/EGCvXiUy0fI4xeSknnH/92T/+ +fgIAAP//GkWjYBSMXAAIAAD//2zZtzAAEgAA` + expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") + + verifier, err := NewDigestVerifier(expected) + if err != nil { + t.Fatalf("unexpected error creating verifier: %v", err) + } + + rd := base64.NewDecoder(base64.StdEncoding, strings.NewReader(badTar)) + + if _, err := io.Copy(verifier, rd); err == nil { + t.Fatalf("unexpected error verifying input data: %v", err) + } + + if verifier.Verified() { + // For now, we expect an error, since tarsum library cannot handle + // compressed tars (!!!). + t.Fatalf("no error received after invalid tar") + } +} + +// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for +// DigestVerifier. We should be tarsum/gzip limited for common cases but we +// want to verify this. +// +// The relevant benchmarks for comparison can be run with the following +// commands: +// +// go test -bench . crypto/sha1 +// go test -bench . github.com/docker/docker/pkg/tarsum +// diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/doc.go new file mode 100644 index 00000000..bdd8cb70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile new file mode 100644 index 00000000..0ed4e526 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile @@ -0,0 +1,26 @@ +FROM docs/base:latest +MAINTAINER Mary Anthony (@moxiegirl) + +# To get the git info for this repo +COPY . /src + +COPY . /docs/content/registry/ + +# Sed to process GitHub Markdown +# 1-2 Remove comment code from metadata block +# 3 Change ](/word to ](/project/ in links +# 4 Change ](word.md) to ](/project/word) +# 5 Remove .md extension from link text +# 6 Change ](./ to ](/project/word) +# 7 Change ](../../ to ](/project/ +# 8 Change ](../ to ](/project/ +# +RUN find /docs/content/registry -type f -name "*.md" -exec sed -i.old \ + -e '/^/g' \ + -e '/^/g' \ + -e 's/\(\]\)\([(]\)\(\/\)/\1\2\/registry\//g' \ + -e 's/\(\][(]\)\([A-Za-z0-9]*\)\(\.md\)/\1\/registry\/\2/g' \ + -e 's/\([(]\)\(.*\)\(\.md\)/\1\2/g' \ + -e 's/\(\][(]\)\(\.\/\)/\1\/registry\//g' \ + -e 's/\(\][(]\)\(\.\.\/\.\.\/\)/\1\/registry\//g' \ + -e 's/\(\][(]\)\(\.\.\/\)/\1\/registry\//g' {} \; \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile new file mode 100644 index 00000000..021e8f6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile @@ -0,0 +1,55 @@ +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate + +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_GRAPHDRIVER \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +# Get the IP ADDRESS +DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") +HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") +HUGO_BIND_IP=0.0.0.0 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) + + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: docs + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-draft: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + + +docs-build: +# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files +# echo "$(GIT_BRANCH)" > GIT_BRANCH +# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET +# echo "$(GITCOMMIT)" > GITCOMMIT + docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md new file mode 100644 index 00000000..558a1199 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md @@ -0,0 +1,54 @@ + + +# Architecture + +## Design +**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. + +### Eventual Consistency + +> **NOTE:** This section belongs somewhere, perhaps in a design document. We +> are leaving this here so the information is not lost. + +Running the registry on eventually consistent backends has been part of the +design from the beginning. This section covers some of the approaches to +dealing with this reality. + +There are a few classes of issues that we need to worry about when +implementing something on top of the storage drivers: + +1. Read-After-Write consistency (see this [article on + s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). +2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). + +In reality, the registry must worry about these kinds of errors when doing the +following: + +1. Accepting data into a temporary upload file may not have latest data block + yet (read-after-write). +2. Moving uploaded data into its blob location (write-write race). +3. Modifying the "current" manifest for given tag (write-write race). +4. A whole slew of operations around deletes (read-after-write, delete-write + races, garbage collection, etc.). + +The backend path layout employs a few techniques to avoid these problems: + +1. Large writes are done to private upload directories. This alleviates most + of the corruption potential under multiple writers by avoiding multiple + writers. +2. Constraints in storage driver implementations, such as support for writing + after the end of a file to extend it. +3. Digest verification to avoid data corruption. +4. Manifest files are stored by digest and cannot change. +5. All other non-content files (links, hashes, etc.) are written as an atomic + unit. Anything that requires additions and deletions is broken out into + separate "files". Last writer still wins. + +Unfortunately, one must play this game when trying to build something like +this on top of eventually consistent storage systems. If we run into serious +problems, we can wrap the storagedrivers in a shared consistency layer but +that would increase complexity and hinder registry cluster performance. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/authentication.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/authentication.md new file mode 100644 index 00000000..507c9a66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/authentication.md @@ -0,0 +1,185 @@ + + +# Authentication + +While running an unrestricted registry is certainly ok for development, secured local networks, or test setups, you should probably implement access restriction if you plan on making your registry available to a wider audience or through public internet. + +The Registry supports two different authentication methods to get your there: + + * direct authentication, through the use of a proxy + * delegated authentication, redirecting to a trusted token server + +The first method is recommended for most people as the most straight-forward solution. + +The second method requires significantly more investment, and only make sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. + +## Direct authentication through a proxy + +With this method, you implement basic authentication in a reverse proxy that sits in front of your registry. + +Since the Docker engine uses basic authentication to negotiate access to the Registry, securing communication between docker engines and your proxy is absolutely paramount. + +While this model gives you the ability to use whatever authentication backend you want through a secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Below is a simple example of secured basic authentication (using TLS), using nginx as a proxy. + +### Requirements + +You should have followed entirely the basic [deployment guide](deploying.md). + +If you have not, please take the time to do so. + +At this point, it's assumed that: + + * you understand Docker security requirements, and how to configure your docker engines properly + * you have installed Docker Compose + * you have a `domain.crt` and `domain.key` files, for the CN `myregistrydomain.com` (or whatever domain name you want to use) + * these files are located inside the current directory, and there is nothing else in that directory + * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates + * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm registry`) + +### Setting things up + +Read again the requirements. + +Ready? + +Run the following: + +``` +mkdir auth +mkdir data + +# This is the main nginx configuration you will use +cat < auth/registry.conf +upstream docker-registry { + server registry:5000; +} + +server { + listen 443 ssl; + server_name myregistrydomain.com; + + # SSL + ssl_certificate /etc/nginx/conf.d/domain.crt; + ssl_certificate_key /etc/nginx/conf.d/domain.key; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + auth_basic "registry.localhost"; + auth_basic_user_file /etc/nginx/conf.d/registry.password; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + proxy_pass http://docker-registry; + proxy_set_header Host \$http_host; # required for docker client's sake + proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$scheme; + proxy_read_timeout 900; + } +} +EOF + +# Now, create a password file for "testuser" and "testpassword" +echo 'testuser:$2y$05$.nIfPAEgpWCh.rpts/XHX.UOfCRNtvMmYjh6sY/AZBmeg/dQyN62q' > auth/registry.password + +# Alternatively you could have achieved the same thing with htpasswd +# htpasswd -Bbc auth/registry.password testuser testpassword + +# Copy over your certificate files +cp domain.crt auth +cp domain.key auth + +# Now create your compose file + +cat < docker-compose.yml +nginx: + image: "nginx:1.9" + ports: + - 5043:443 + links: + - registry:registry + volumes: + - `pwd`/auth/:/etc/nginx/conf.d + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + environment: + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /data + volumes: + - `pwd`/data:/data +EOF +``` + +### Starting and stopping + +That's it. You can now: + + * `docker-compose up -d` to start your registry + * `docker login myregistrydomain.com:5043` (using `testuser` and `testpassword`) + * `docker tag ubuntu myregistrydomain.com:5043/toto` + * `docker push myregistrydomain.com:5043/toto` + +### Docker still complains about the certificate? + +That's certainly because you are using a self-signed certificate, despite the warnings. + +If you really insist on using these, you have to trust it at the OS level. + +Usually, on Ubuntu this is done with: +``` +cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt +update-ca-certificates +``` + +... and on RedHat with: +``` +cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt +update-ca-trust +``` + +Now: + + * `service docker stop && service docker start` (or any other way you use to restart docker) + * `docker-compose up -d` to bring your registry up + +## Token-based delegated authentication + +This is **advanced**. + +You will find [background information here](./spec/auth/token.md), [configuration information here](configuration.md#auth). + +Beware that you will have to implement your own authentication service for this to work (though there exist third-party open-source implementations). + +# Manual Set-up + +If you'd like to manually configure your HTTP server, here are a few requirements that are absolutely necessary for the docker client to be able to interface with it: + +- Each response needs to have the header "Docker-Distribution-Api-Version registry/2.0" set, even (especially) if there is a 401 or 404 error response. Make sure using cURL that this header is provided. Note: If you're using Nginx, this functionality is only available since 1.7.5 using the "always" add_header directive, or when compiling with the "more_set_headers" module. + +- A large enough maximum for client body size, preferably unlimited. Because images can be pretty big, the very low default maximum size of most HTTP servers won't be sufficient to be able to upload the files. + +- Support for chunked transfer encoding. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md new file mode 100644 index 00000000..b54322c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md @@ -0,0 +1,157 @@ + + +# Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + +```sh +go get github.com/docker/distribution/cmd/registry +``` + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + +```sh +mkdir -p /var/lib/registry +``` + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + +``` +$ $GOPATH/bin/registry -version +$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown +``` + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + +``` +$ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-dev.yml +INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown +INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown +INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown +INFO[0000] debug server listening localhost:5001 +``` + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + +``` +go get github.com/tools/godep github.com/golang/lint/golint +``` + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + +``` +$ GOPATH=`godep path`:$GOPATH make ++ clean ++ fmt ++ vet ++ lint ++ build +github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar +github.com/Sirupsen/logrus +github.com/docker/libtrust +... +github.com/yvasiyarov/gorelic +github.com/docker/distribution/registry/handlers +github.com/docker/distribution/cmd/registry ++ test +... +ok github.com/docker/distribution/digest 7.875s +ok github.com/docker/distribution/manifest 0.028s +ok github.com/docker/distribution/notifications 17.322s +? github.com/docker/distribution/registry [no test files] +ok github.com/docker/distribution/registry/api/v2 0.101s +? github.com/docker/distribution/registry/auth [no test files] +ok github.com/docker/distribution/registry/auth/silly 0.011s +... ++ /Users/sday/go/src/github.com/docker/distribution/bin/registry ++ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template ++ /Users/sday/go/src/github.com/docker/distribution/bin/dist ++ binaries +``` + +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + +```sh +$ ./bin/registry -version +./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m +``` + +### Developing + +The above approaches are helpful for small experimentation. If more complex +tasks are at hand, it is recommended to employ the full power of `godep`. + +The Makefile is designed to have its `GOPATH` defined externally. This allows +one to experiment with various development environment setups. This is +primarily useful when testing upstream bugfixes, by modifying local code. This +can be demonstrated using `godep` to migrate the `GOPATH` to use the specified +dependencies. The `GOPATH` can be migrated to the current package versions +declared in `Godeps` with the following command: + +```sh +godep restore +``` + +> **WARNING:** This command will checkout versions of the code specified in +> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is +> undesired, it is recommended to create a workspace devoted to work on the +> _Distribution_ project. + +With a successful run of the above command, one can now use `make` without +specifying the `GOPATH`: + +```sh +$ make +``` + +If that is successful, standard `go` commands, such as `go test` should work, +per package, without issue. + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. + +To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) +(librados-dev and librbd-dev will be required to build the bindings): + +```sh +export DOCKER_BUILDTAGS='include_rados' +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md new file mode 100644 index 00000000..f2f58a4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md @@ -0,0 +1,1630 @@ + + + + +# Registry Configuration Reference + +The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. + +## Override configuration options + +In a typical setup where you run your Registry from the official image, you can specify any configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. + +To override a configuration option, create an environment variable named +`REGISTRY_variable` where *`variable`* is the name of the configuration option +and the `_` (underscore) represents indention levels. For example, you can +configure the `rootdirectory` of the `filesystem` storage backend: + + storage: + filesystem: + rootdirectory: /var/lib/registry + +To override this value, set an environment variable like this: + + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere + +This variable overrides the `/var/lib/registry` value to the `/somewhere` +directory. + +>**Note**: If an environment variable changes a map value into a string, such +>as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then +>all sub-fields will be erased. As such, specifying the storage type in the +>environment will remove all parameters related to the old storage +>configuration. + + + +## List of configuration options + +This section lists all the registry configuration options. Some options in +the list are mutually exclusive. So, make sure to read the detailed reference +information about each option that appears later in this page. + + version: 0.1 + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com + loglevel: debug # deprecated: use "log" + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + bucket: bucketname + encrypt: true + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + rados: + poolname: radospool + username: radosuser + chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000 + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + http: + addr: localhost:5000 + prefix: /my/nested/registry/ + secret: asecretforlocaldevelopment + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + debug: + addr: localhost:5001 + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +In some instances a configuration option is **optional** but it contains child +options marked as **required**. This indicates that you can omit the parent with +all its children. However, if the parent is included, you must also include all +the children marked **required**. + + + +## version + + version: 0.1 + +The `version` option is **required**. It specifies the configuration's version. +It is expected to remain a top-level field, to allow for a consistent version +check before parsing the remainder of the configuration file. + +## log + +The `log` subsection configures the behavior of the logging system. The logging +system outputs everything to stdout. You can adjust the granularity and format +with this configuration section. + + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ level + + no + + Sets the sensitivity of logging output. Permitted values are + error, warn, info and + debug. The default is info. +
+ formatter + + no + + This selects the format of logging output. The format primarily affects how keyed + attributes for a log line are encoded. Options are text, json or + logstash. The default is text. +
+ fields + + no + + A map of field names to values. These are added to every log line for + the context. This is useful for identifying log messages source after + being mixed in other systems. +
+ +## hooks + + hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com + +The `hooks` subsection configures the logging hooks' behavior. This subsection +includes a sequence handler which you can use for sending mail, for example. +Refer to `loglevel` to configure the level of messages printed. + +## loglevel + +> **DEPRECATED:** Please use [log](#log) instead. + + loglevel: debug + +Permitted values are `error`, `warn`, `info` and `debug`. The default is +`info`. + +## storage + + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + bucket: bucketname + encrypt: true + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + rados: + poolname: radospool + username: radosuser + chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + redirect: + disable: false + +The storage option is **required** and defines which storage backend is in use. +You must configure one backend; if you configure more, the registry returns an error. + +If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Window's `MAX_PATH` limits. Failure to do so can result in the following error message: + + mkdir /XXX protocol error and your registry will not function properly. + +### cache + +Use the `cache` subsection to enable caching of data accessed in the storage +backend. Currently, the only available cache provides fast access to layer +metadata. This, if configured, uses the `blobdescriptor` field. + +You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses +a Redis pool to cache layer metadata. The `inmemory` value uses an in memory +map. + +>**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +>are equivalent, `layerinfo` has been deprecated, in favor or +>`blobdescriptor`. + +### redirect + +The `redirect` subsection provides configuration for managing redirects from +content backends. For backends that support it, redirecting is enabled by +default. Certain deployment scenarios may prefer to route all data through the +Registry, rather than redirecting to the backend. This may be more efficient +when using a backend that is not colocated or when a registry instance is +doing aggressive caching. + +Redirects can be disabled by adding a single flag `disable`, set to `true` +under the `redirect` section: + + redirect: + disable: true + +### filesystem + +The `filesystem` storage backend uses the local disk to store registry files. It +is ideal for development and may be appropriate for some small-scale production +applications. + +This backend has a single, required `rootdirectory` parameter. The parameter +specifies the absolute path to a directory. The registry stores all its data +here so make sure there is adequate space available. + +### azure + +This storage backend uses Microsoft's Azure Blob Storage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accountname + + yes + + Azure account name. +
+ accountkey + + yes + + Azure account key. +
+ container + + yes + + Name of the Azure container into which to store data. +
+ realm + + no + + Domain name suffix for the Storage Service API endpoint. By default, this + is core.windows.net. +
+ + +### rados + +This storage backend uses [Ceph Object Storage](http://ceph.com/docs/master/rados/). + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ poolname + + yes + + Ceph pool name. +
+ username + + no + + Ceph cluster user to connect as (i.e. admin, not client.admin). +
+ chunksize + + no + + Size of the written RADOS objects. Default value is 4MB (4194304). +
+ + +### S3 + +This storage backend uses Amazon's Simple Storage Service (S3). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accesskey + + yes + + Your AWS Access Key. +
+ secretkey + + yes + + Your AWS Secret Key. +
+ region + + yes + + The AWS region in which your bucket exists. For the moment, the Go AWS + library in use does not use the newer DNS based bucket routing. +
+ bucket + + yes + + The bucket name in which you want to store the registry's data. +
+ encrypt + + no + + Specifies whether the registry stores the image in encrypted format or + not. A boolean value. The default is false. +
+ secure + + no + + Indicates whether to use HTTPS instead of HTTP. A boolean value. The + default is false. +
+ v4auth + + no + + Indicates whether the registry uses Version 4 of AWS's authentication. + Generally, you should set this to true. By default, this is + false. +
+ chunksize + + no + + The S3 API requires multipart upload chunks to be at least 5MB. This value + should be a number that is larger than 5*1024*1024. +
+ rootdirectory + + no + + This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. +
+ +### Maintenance + +Currently the registry can perform one maintenance function: upload purging. This and future +maintenance functions which are related to storage can be configured under the maintenance section. + +### Upload Purging + +Upload purging is a background process that periodically removes orphaned files from the upload +directories of the registry. Upload purging is enabled by default. To + configure upload directory purging, the following parameters +must be set. + + +| Parameter | Required | Description + --------- | -------- | ----------- +`enabled` | yes | Set to true to enable upload purging. Default=true. | +`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) +`interval` | yes | The interval between upload directory purging. Default=24h. +`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. + +Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). + +### Openstack Swift + +This storage backend uses Openstack Swift object storage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ authurl + + yes + + URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +
+ username + + yes + + Your Openstack user name. +
+ password + + yes + + Your Openstack password. +
+ region + + no + + The Openstack region in which your container exists. +
+ container + + yes + + The container name in which you want to store the registry's data. +
+ tenant + + no + + Your Openstack tenant name. +
+ tenantid + + no + + Your Openstack tenant id. +
+ domain + + no + + Your Openstack domain name for Identity v3 API. +
+ domainid + + no + + Your Openstack domain id for Identity v3 API. +
+ insecureskipverify + + no + + true to skip TLS verification, false by default. +
+ chunksize + + no + + Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). +
+ rootdirectory + + no + + This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. +
+ + +## auth + + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + +The `auth` option is **optional**. There are +currently 2 possible auth providers, `silly` and `token`. You can configure only +one `auth` provider. + +### silly + +The `silly` auth is only for development purposes. It simply checks for the +existence of the `Authorization` header in the HTTP request. It has no regard for +the header's value. If the header does not exist, the `silly` auth responds with a +challenge response, echoing back the realm, service, and scope that access was +denied for. + +The following values are used to configure the response: + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ + + +### token + +Token based authentication allows the authentication system to be decoupled from +the registry. It is a well established authentication paradigm with a high +degree of security. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ issuer + + yes + +The name of the token issuer. The issuer inserts this into +the token so it must match the value configured for the issuer. +
+ rootcertbundle + + yes + +The absolute path to the root certificate bundle. This bundle contains the +public part of the certificates that is used to sign authentication tokens. +
+ +For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). + +### htpasswd + +The _htpasswd_ authentication backed allows one to configure basic auth using an +[Apache HTPasswd File](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). +Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are +supported. Entries with other hash types will be ignored. The htpasswd file is +loaded once, at startup. If the file is invalid, the registry will display and +error and will not start. + +> __WARNING:__ This authentication scheme should only be used with TLS +> configured, since basic authentication sends passwords as part of the http +> header. + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ path + + yes + + Path to htpasswd file to load at startup. +
+ +## middleware + +The `middleware` option is **optional**. Use this option to inject middleware at +named hook points. All middlewares must implement the same interface as the +object they're wrapping. This means a registry middleware must implement the +`distribution.Namespace` interface, repository middleware must implement +`distribution.Repository`, and storage middleware must implement +`driver.StorageDriver`. + +Currently only one middleware, `cloudfront`, a storage middleware, is supported +in the registry implementation. + + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000 + +Each middleware entry has `name` and `options` entries. The `name` must +correspond to the name under which the middleware registers itself. The +`options` field is a map that details custom configuration required to +initialize the middleware. It is treated as a `map[string]interface{}`. As such, +it supports any interesting structures desired, leaving it up to the middleware +initialization function to best determine how to handle the specific +interpretation of the options. + +### cloudfront + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ baseurl + + yes + + SCHEME://HOST[/PATH] at which Cloudfront is served. +
+ privatekey + + yes + + Private Key for Cloudfront provided by AWS. +
+ keypairid + + yes + + Key pair ID provided by AWS. +
+ duration + + no + + Duration for which a signed URL should be valid. +
+ + +## reporting + + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + +The `reporting` option is **optional** and configures error and metrics +reporting tools. At the moment only two services are supported, [New +Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid +configuration may contain both. + +### bugsnag + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ apikey + + yes + + API Key provided by Bugsnag +
+ releasestage + + no + + Tracks where the registry is deployed, for example, + production,staging, or + development. +
+ endpoint + + no + + Specify the enterprise Bugsnag endpoint. +
+ + +### newrelic + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ licensekey + + yes + + License key provided by New Relic. +
+ name + + no + + New Relic application name. +
+ verbose + + no + + Enable New Relic debugging output on stdout. +
+ +## http + + http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + secret: asecretforlocaldevelopment + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + debug: + addr: localhost:5001 + +The `http` option details the configuration for the HTTP server that hosts the registry. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + The address for which the server should accept connections. The form depends on a network type (see net option): + HOST:PORT for tcp and FILE for a unix socket. +
+ net + + no + + The network which is used to create a listening socket. Known networks are unix and tcp. + The default empty value means tcp. +
+ prefix + + no + +If the server does not run at the root path use this value to specify the +prefix. The root path is the section before v2. It +should have both preceding and trailing slashes, for example /path/. +
+ secret + + yes + +A random piece of data. This is used to sign state that may be stored with the +client to protect against tampering. For production environments you should generate a +random piece of data using a cryptographically secure random generator. This +configuration parameter may be omitted, in which case the registry will automatically +generate a secret at launch. +

+WARNING: If you are building a cluster of registries behind a load balancer, you MUST +ensure the secret is the same for all registries. +

+ + +### tls + +The `tls` struct within `http` is **optional**. Use this to configure TLS +for the server. If you already have a server such as Nginx or Apache running on +the same host as the registry, you may prefer to configure TLS termination there +and proxy connections to the registry server. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ certificate + + yes + + Absolute path to x509 cert file +
+ key + + yes + + Absolute path to x509 private key file. +
+ clientcas + + no + + An array of absolute paths to a x509 CA file +
+ + +### debug + +The `debug` option is **optional** . Use it to configure a debug server that +can be helpful in diagnosing problems. The debug endpoint can be used for +monitoring registry metrics and health, as well as profiling. Sensitive +information may be available via the debug endpoint. Please be certain that +access to the debug endpoint is locked down in a production environment. + +The `debug` section takes a single, required `addr` parameter. This parameter +specifies the `HOST:PORT` on which the debug server should accept connections. + + +## notifications + + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + +The notifications option is **optional** and currently may contain a single +option, `endpoints`. + +### endpoints + +Endpoints is a list of named services (URLs) that can accept event notifications. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ name + + yes + +A human readable name for the service. +
+ disabled + + no + +A boolean to enable/disable notifications for a service. +
+ url + + yes + +The URL to which events should be published. +
+ headers + + yes + + Static headers to add to each request. +
+ timeout + + yes + + An HTTP timeout value. This field takes a positive integer and an optional + suffix indicating the unit of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ threshold + + yes + + An integer specifying how long to wait before backing off a failure. +
+ backoff + + yes + + How long the system backs off before retrying. This field takes a positive + integer and an optional suffix indicating the unit of time. Possible units + are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ + +## redis + + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +Declare parameters for constructing the redis connections. Registry instances +may use the Redis instance for several applications. The current purpose is +caching information about immutable blobs. Most of the options below control +how the registry connects to redis. You can control the pool's behavior +with the [pool](#pool) subsection. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + Address (host and port) of redis instance. +
+ password + + no + + A password used to authenticate to the redis instance. +
+ db + + no + + Selects the db for each connection. +
+ dialtimeout + + no + + Timeout for connecting to a redis instance. +
+ readtimeout + + no + + Timeout for reading from redis connections. +
+ writetimeout + + no + + Timeout for writing to redis connections. +
+ + +### pool + + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +Configure the behavior of the Redis connection pool. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ maxidle + + no + + Sets the maximum number of idle connections. +
+ maxactive + + no + + sets the maximum number of connections that should + be opened before blocking a connection request. +
+ idletimeout + + no + + sets the amount time to wait before closing + inactive connections. +
+ + +## Example: Development configuration + +The following is a simple example you can use for local development: + + version: 0.1 + log: + level: debug + storage: + filesystem: + rootdirectory: /var/lib/registry + http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + +The above configures the registry instance to run on port `5000`, binding to +`localhost`, with the `debug` server enabled. Registry data storage is in the +`/var/lib/registry` directory. Logging is in `debug` mode, which is the most +verbose. + +A similar simple configuration is available at +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). +Both are generally useful for local development. + + +## Example: Middleware configuration + +This example illustrates how to configure storage middleware in a registry. +Middleware allows the registry to serve layers via a content delivery network +(CDN). This is useful for reducing requests to the storage layer. + +Currently, the registry supports [Amazon +Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in +conjunction with the S3 storage driver. + + + + + + + + + + + + + + + + + + +
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: + A set of key/value options to configure the middleware. +
    +
  • baseurl: The Cloudfront base URL.
  • +
  • privatekey: The location of your AWS private key on the filesystem.
  • +
  • keypairid: The ID of your Cloudfront keypair.
  • +
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • +
+
+ +The following example illustrates these values: + + middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 + + +>**Note**: Cloudfront keys exist separately to other AWS keys. See +>[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +>for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md new file mode 100644 index 00000000..ef44641a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md @@ -0,0 +1,177 @@ + + + +# Deploying a registry server + +You obviously need to [install Docker](https://docs.docker.com/installation/) (remember you need **Docker version 1.6.0 or newer**). + +## Getting started + +Start your registry: + + $ docker run -d -p 5000:5000 \ + --restart=always --name registry registry:2 + +That's it. + +You can now tag an image and push it: + + $ docker pull ubuntu && docker tag ubuntu localhost:5000/batman/ubuntu + $ docker push localhost:5000/batman/ubuntu + +Then pull it back: + + $ docker pull localhost:5000/batman/ubuntu + +## Where is my data? + +By default, your registry stores its data on the local filesystem, inside the container. + +In a production environment, it's highly recommended to use [another storage backend](storagedrivers.md), by [configuring it](configuration.md). + +If you want to stick with the local posix filesystem, you should store your data outside of the container. + +This is achieved by mounting a volume into the container: + + $ docker run -d -p 5000:5000 \ + -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/var/lib/registry \ + -v /myregistrydata:/var/lib/registry \ + --restart=always --name registry registry:2 + +## Making your Registry available + +Now that your registry works on `localhost`, you probably want to make it available as well to other hosts. + +Let assume your registry is accessible via the domain name `myregistrydomain.com` (still on port `5000`). + +If you try to `docker pull myregistrydomain.com:5000/batman/ubuntu`, you will see the following error message: + +``` +FATA[0000] Error response from daemon: v1 ping attempt failed with error: +Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. +If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add +`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. +In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; +simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt +``` + +If trying to reach a non `localhost` registry, Docker requires that you secure it using https, or make it explicit that you want to run an insecure registry. + +You basically have three different options to comply with that security requirement here. + +### 1. buy a SSL certificate for your domain + +This is the (highly) recommended solution. + +You can buy a certificate for as cheap as 10$ a year (some registrars even offer certificates for free), and this will save you a lot of trouble. + +Assuming you now have a `domain.crt` and `domain.key` inside a directory named `certs`: + +``` +# Stop your registry +docker stop registry && docker rm registry + +# Start your registry with TLS enabled +docker run -d -p 5000:5000 \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + --restart=always --name registry \ + registry:2 +``` + +A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: + +``` +$ cat server.crt intermediate-certificates.pem > server.with-intermediate.crt +``` + +You then configure the registry to use your certificate bundle by providing the `REGISTRY_HTTP_TLS_CERTIFICATE` environment variable. + +**Pros:** + + - best solution + - work without further ado (assuming you bought your certificate from a CA that is trusted by your operating system) + +**Cons:** + + - ? + +### 2. instruct docker to trust your registry as insecure + +This basically tells Docker to entirely disregard security for your registry. + +1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`). Restart docker. +2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` + +**Pros:** + + - easy to configure + +**Cons:** + + - very insecure + - you have to configure every docker daemon that wants to access your registry + +### 3. use a self-signed certificate and configure docker to trust it + +Alternatively, you can generate your own certificate: + +``` +mkdir -p certs && openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 365 -out certs/domain.crt +``` + +Be sure to use the name `myregistrydomain.com` as a CN. + +Now go to solution 1 above and stop and restart your registry. + +Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt` (don't forget to restart docker after doing so). + +**Pros:** + + - more secure than solution 2 + +**Cons:** + + - you have to configure every docker daemon that wants to access your registry + +## Using Compose + +It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate managing your Registry configuration. + +Here is a simple `docker-compose.yml` that does setup your registry exactly as above, with TLS enabled. + +``` +registry: + restart: always + image: registry:2 + ports: + - 5000:5000 + environment: + REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt + REGISTRY_HTTP_TLS_KEY: /certs/domain.key + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /var/lib/registry + volumes: + - /path/registry-data:/var/lib/registry + - /path/certs:/certs +``` + +You can then start your registry with a simple + + $ docker-compose up -d + + +## Next + +You are now ready to explore [the registry configuration](configuration.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md new file mode 100644 index 00000000..fbe502cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md @@ -0,0 +1,70 @@ + + +# Glossary + +This page contains definitions for distribution related terms. + +
+

Blob

+
+
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
+

+ Layers are a good example of "blobs". +

+
+ +

Image

+
+
An image is a named set of immutable data from which a Docker container can be created.
+

+ An image is represented by a json file called a manifest, and is conceptually a set of layers. + + Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. + +

+
+ +

Layer

+
+
A layer is a tar archive bundling partial content from a filesystem.
+

+ Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. +

+
+ +

Manifest

+
A manifest is the JSON representation of an image.
+ +

Namespace

+
A namespace is a collection of repositories with a common name prefix.
+

+ The namespace with an empty prefix is considered the Global Namespace. +

+
+ +

Registry

+
A registry is a service that let you store and deliver images.
+
+ +

Repository

+
+
A repository is a set of data containing all versions of a given image.
+
+ +

Scope

+
A scope is the portion of a namespace onto which a given authorization token is granted.
+ +

Tag

+
A tag is conceptually a "version" of a named image.
+

+ Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". +

+ +
+ + +
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md new file mode 100644 index 00000000..8deb6a14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md @@ -0,0 +1,24 @@ + + +# Getting help + +If you need help, or just want to chat, you can reach us: + +- on irc: `#docker-distribution` on freenode +- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) + +If you want to report a bug: + +- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) +- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) + +You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/project/get-help). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy new file mode 100644 index 00000000..5ecf4c3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png new file mode 100644 index 0000000000000000000000000000000000000000..09de8d2376d6f986374fceeb1e26389d3ab604df GIT binary patch literal 37836 zcmeEuWmHvN6fJQn>F!3lk#1=P6p@k;xP%DO4boh?q$LFv6_D>&l_)yJI41Ny!V`a_St)_x#pbfggsSP!oEjw4*>xITSZy^83F?0DgpwMGCC^w z%gYy6gWx}iPS2DcB77g9+C)H*K~RyGeeRC9lYx<~HFDGGo46)xRj+0{oGm+?tLmE? zf#*Rld7p;Hll&n~W)9vWeH-JEKoAx=gM484n#RxU1l0GIE;pB#;+7e8gPSv6gT;SD zy|RuBU4H9%6`hxjPt7;L8eGHt%!6PMWxv}G-7@5ng#=n30!HlJ$B!VJwT%A1110pQ z36jt7`%wS=^V5VW)PKKhVuY~v(0oO=2>f?0)*c&VCK)97X8|9H$UNpbq)hhqPzD@T zA`;Nue)iE3!%LOe1E%f{=@7&Dr~kXUdk})c6eDr7&A;m-D5So*8>b&c;-O#8PF%RL z(7)^Z<@5>NT}=jwhF(VFR9S(S^56AkG%P6pJADjZHmlc1h7|07*9W(U{eQYWsuf~L znMMD***Z5Rvpna`pU;YnB)=Ngxwf5~;c9<3U`rEv^KQIQpM_GPW%1_f=*jub<=!W) zeC=m7j;lefN@z0PXw)mIuOkL%VpMJi{)}!REc5VmclwilmF>?Dav`Bl>Rfj}s%MBD zUL6hQJ#7of&hkDCGjJYOD7Bk!42b8mp%k`FfW}B_H(vg(Bl{kf(Gt~h&`HbVwAQD> zX`HYpLq}IN>zXEU&_<}iEx~j02|2Tf1si+aUky^H+Z<2mz9J@>;1Id=i+$taK>ryE49hccH5B1Qq9)4{O1yG zyT9&BdL9pL{HlDzk*gGg)9G-1GLg@ULoN3FsN&7EP<#QWUa7pg=ql0qZq4Ax2*PTDZ+fN z-lJJn*Ox^-os~+GF5S# zy0rA)E2nIy^#T7E6|Ye$ZKiT;S<>wvHWBPKff10p?^aA8kwnSNH@-!_{IiG4X;A$% z=I_PC57fr1!^hZ|f1Cy-g^k1P7r_1Eq&xpqd99?|7U-0Ohe{!AOZDnkxs`EeG_`-- zUZXfw(f#NC+u!TdV`8K(He|q`k;i$fY4jU@^VW6Jc&WDTO_i~D*A!v%1!fW!dX=}<_ENST5%Y@ZPfdTYLN@4DO@HAPIq0yqu z`|YGJAx3Xcv`!*QU$h?iXZc*H#Aqm1V^fJdjd|&PpwQnrSpq#FJ3rgXFM42PuUH?7 zYt`Ie%Z9!V1v5jD;+=dE`@BKl*QYTp@$A^aFOC|g4|eRa|!XvnyLZhdl$oUdK@lixp11=&NM z25%rGj!_BLYboz>x-+#EDWQLK>c7|;Vtr#Gd>(?9A5wIl!Ut7E5SFqRK&_7*FRYq- zt$vVpw;NE9GW9wl7FWPF>#dkD;-D0=8B$QgB;mxvTECh9d!6+ujzQNC1$q+HzNMEx zyS2J042vo;#%gWRt-ucSc>Orm#Ygwa^epspBAXr{3Z(Y za;3O5M(z4!p{%5~ykt0)K`#n2KN>5Owf5+C1)Iz*VZq9#tL%90KSs60=}G3H#W4;0 z@T-s#qr{fXIfVvuWNW-IzjeXbO#93F`g(5&tqtoI4q3ip}1_aT# z4o$RJhu-_mX!+EurJkz+iw@}_&Ad0iDy-Wn*q#m;2SQgy2%O2Ked@VmTN!GW)>Z@` z-q7EM9pw~1OL=n95LVe@<6={Cr&hd$^P~00Q51qLNI&y+crvCdY@fAJ%xN#ank@bR z60d{nB8A;uJ}URPIKLjUMAVlHTB+yv9bq7;a443d~3D7XNzdWZ$p0wVi}r| zEr$l%lwjH770gw0ZA4s(~7MV8n~Qwzxze@piV17%!Pj<_rdYDFPbe`valznss#wP<(me#TW(ampW~T{=Zj}~GJZAZ`}bPxwcngC$Q#d(>8p9o zdy~fOj;fOc_h?8gk8N@l^dyz8Wow4Em2l_DBKc)jO3tv;`NaI=Frh{stHe|F9%Z5{ z!neuS4-+C$aUoXgy}>o_bGa+l7dh5MW}#?Dz)@QAj7)N)Y*0$Spyv5kqlVg3S5h9M z0=Y@DahSWD4c}F`QN^rGU&+$17Z2{|>lBUC@XXLxo*8eDaunfbZN5?L^#5Rr&VQAo zll856=l~g4B74vricTG4{y{j?4qFzQQl}En96&sRXC{ykO+4uyw7r`AG6@kGwW}{x zz=iC`mwK6mzXvhC8$)#!=&E)$_KbceZ)Rq)L!vu}GCkJFse3y6@fBI@M@(RK5#d?3 zcUig{e_!k3S5q$pIoGp%ecW}pv9@1eHaWi{7I)FZOmq&6 zO*ZmWS@-7q6R8y0C&B9Zi}&xMsCyM46x}I)9Hq7-JoG3wSf-Mdxh+A^>Rh+Ci@~H@PXz`P zC_`~!HT1f}P6fIND6(9AGE_&0hmh6*JfA!)*g>3-?X#Sb5XwUHi($ zdWV@T(cL}OMTZ$oQ`|O2~3MgF$|%&^aJcIXp@y zmy7kw!YIz$S^ou{yPKZUa0CU>{BQBWcX4Z906@?z>lhD~JM_d322=1&o#YM`Hw}Wp z_|9s+3wkLW4{vr%t$_~VN*OSP>Le%kQaO|6wlXzIr5*n@jVK|%4_w)pw zmvbCbCQ|SL(m^5hPUblDesoW=q$ExFgdq4n%Nf!{es`mM`*Z2tVj-c>y*WILnFQyn&nAul>S=fZHnvOHm(gC(`FLR{vw zu0B;^t@CV3R&T%g%cINFUBYnW#izoyB~7m&iYoO&j`4U*p3H~@${dJ;VTZLY+e9)E zBwX)mQ&KTaUa*nGr%b?#BD%j9rPF^qS|8Lg=}18H>}w|v-QSri8xhJ0xUd`fjO}&U zu7x!Vo@|>_j*X}4j$yG&ha5rxNSOk}ddEr2_~wjV)&BYVRo|^mF%Difcvgf<|FoI- zZYZIULrXyCPsO|-sxh!wYdjp!WUR=*OJ$hOCpV6_Lij{tSWs5zyosoKPy&p58AEbi%$xj;NX~MOhU7z4>kdPq(g_fnsr&8s+rF_E#N*K!q zzKOd-J!QX~^x(uEhPd0>=q%nZPo7*{yxEw?jN)z`5_Ab7WfGW|IfUvJzPCZeGAWf2tW_$n&#M2bGJLSm@kQMK*FP_8Hx!k~ z5v&m|PaTOkg{C^zLyCDGtObwMOl#MB@AI?A5Smu7PRDCM0j;8#15epIB zapbew$*$T!TbC|;p*2ZEyxDp-sCjgYW!Q2p+qJ50tn3w5iB4W`k#Owm6)6noP-4W& z2odamVqG>r<#p$xk$NyCXAIU@0Pa>wPP*ht&pS}2wJ9|vV>S$>1%#~!pB|WG^AzEA z_4bYwq==n9YD~fXH=vi{r5-3Wq>xX!VIPID%N!!VE+2m$7`FhUXjece6;l&I9-Goy)% zy09QfnFEXhO7Q8dfj=;4MI+b(HRo$-=3Ap*MYSDClHoC={s{;Q*J6$jLfk9sYE_g96|zLHKike&XX<&X=(NwnyH-yMWO_z_f=hkt(jHH6nRw7yp* zIF!P6a|YjQx7eHY{<{9TylPS zu+5kY^{d90*E7A;*ZNX(O#tF{-<>OW@Rdrd^=>!9@7YuaCBQSw6MtAQAH0FUY(N zLu)Z~>KAkz)ljNI3*-Zv06YyQd2o|>-;iwf;0gK5tN%DD8%N&DOe< zLOXG$e)#^lw*&5yzQOBH(wphZZt=bP+@g~D-?iH-sg{{>phCU3Q)W3hP8r~Nvega# z^!40o;(Nw}S3}unNW*A%+v*pH*3^oI-=4EY;hTE56YHR_*=M~y)oXSD0iYt!do@v~ zClsI2?B?qHa7^bVhUY+o=gCid;aTT+iL3oq{F)YffygjQ*ZNAwRv7lryeG6)^No_~ z-4`d@=1Xm1Nc16oxp`5%8}!0fKjmMB=ze&JLN53^;WXBI^vj3Evyi)dMhO-T_b9Tq zP}Qqp1}cFdhn@ts3yiFCPQU_;=@9>#cMi^U*6{MA^cV6`jHY5cBSUuEp`2~-a`OPi z>?$bzHS-jsD8BAPxyjD~%Fn`RvAbJz+^WH~l z=liX=tB1Wj6ZWS@RhbJQLc)VbB;mrAb#i_-z|PMaJYC3zZPN>k8eX7~O6sFCk=U++ zk|L5^z?qfO6q{b~GoS+4TcDlMiJ(gE(tILG{Cm;Vhs+dI^{=QII-j0`(E4GM&WIDw zYx7g`aYh?>ykleIp@YYQf4&rHo;TOyTA0gD%?y4Eb2rL>2T~us}k9iKi zVU)H*G`rx5ap6kr``u%wqEGKI)~!=Z6FLORa0|_;>GOm^#cQq2>8c`_+!b#%A{no- z-|0JXmn|kxtXiFUtdyGfzP6*5e8stc0iYe^AOqCZ%%br44wkkV2m@1#_nMa2z3fPb zZoOXT~X%HiYK%d z)jz{mYB^(SPAs_&s=yA%pP3?S$x!m+Z&v?)ppb2xKBj-(Gc_Y)R$`ye`=MWtms12j z=oJJoW0Kn4WCI=HukZ4L{|sYOrsMB1(Z_dDenA6-yBKY(GKJfqAL|8HW{u7|C*SRM z@l(RESD(ryZnXh<=3`9L&rgp>lF2k5h5EgvTna+E7!^>M$|LTy0dpjx1cDgV<2+~% ztGL=|@Y()k?3CuO9sAXX{E_^~kbP|~vT#PzMHcRXG_lT-JK*xGtY z&lnF62iXtm7Y#>whwGB>(-6wV$}B_32rAY~$4EjBH3TG-pt1Oj=>=JzpM# zItjfg_s^&-*0NmfNhG9WxhTclL9&>mNqlM?ZOzD~I4vFVVDQNc%QshlIJ^2cf5Pcc+Fz8deBOvlly2 zsgi_iTFF$0m6{&_>}!gFCk+I{DqF*^TKP$hk@WiVuvQzT0YzNU5nRCo*7h37fv>^WwJm_H_@Q*~}{$`-82H zmPmXv`&S?uwaCo{xAz|1Wr0-srhYm8)`p?y^u=WFJB8PDNjY`#MtD#}nMpElLGw!h zJiUr9cq1b+d4fDnw#I6EW-`_8m1dys!cP$^3GtTn&X63lH&&tR)7=lsYY&E|_y zmz9c5)VFN>d*hYL2ZeEGb8uo8B*1w1kL@vw*Q!VDT-hSRw zzy_KFHi;df!g^x-$iA0kxgyx;aT2G726wm}NG>ZyzSq_oMjMLoh2=oFU&sp8C6!b@ z5>cJGe_Emhs2OdmzZcs@Qrs$wdYz#Nis-!Tu~wS3&Q{IKR)DMelEALTJ~Cx;0e&tj z)0CVOIai{G5UlhneA>2U&U9I>|^I*3ktga9-^z0}BT z!aCm^)X#Sjbkn<9wW7J~plGNw?Hxac59lkw{^}Od@n>U#sc=56r&w7uNFE*@?_&Wn z_(&m6tQxg5Tc=a}mJJ)e4?YI^@hD97F+y+RnCsAuCn-8c9he`7P%lSIUA)41X<15; zB80I*hWPD8w3bK&r8Hvb;b*n9mFqwAG3Y^*uy}Pb6+rPZNM9evr@uX&@@8TWz{3T* z_uYLN4fQ0dR&z=J8mIN1vx8-5Qkh7WGm;)* zF__pKbpQMj2Ko{gS=(3U22 z4QBT&0wJU*{tJ(R8K`v_LERQFhb#G9%z1+jE?VF-DsJ=`VBOdG_FSc%z^?39jZ@w| zy1Btius$?z$#an8&HGaL*kk|9IK`Yc0!WkRl(LVe5v(h>Qc}I~^6y3Ry(@s*vVC+4 zU)%=_cG-Xv!`o2tphneH{%5-0tn9XUK7gK^JzD}Sexe+Xs64aUdnSNSbO@9wH{b7a z$2Y~F(kX@>+%1R1JI&XBSbe0g7^9@mEvqQo)%n z*45KdQ!EtoM{E5Y52Uc_T5)Z$`l3)E*yjPfxf0cSO~v9r599q=vrsXmnYJbJD2&2>bM$L`w0t(st_6@c z9M2X*Fg=?ND4a%ky-s)b)sSri-gWEOTm5_)i=gN)b+&+<@w$Z~@|#w^7z|aDuo_=T zxF(cL*tXi_vE}UV$r3rGI{Oh@5+5;bWyJ(-q;ySJ1Yz!l>L$}hk^nzi);1?VN!6aA ztH{JEMN@s1>rJU@56%H0Q0xho8|PX}vEb4fAd*~kDJ7-)xS+2nOe5yVBd?MCprH^f z&pABI6x{=2SH2cr30R9|U&;qbhrY*`V+$Rv=zO-$PieawQ3;&RjiWyJ>@2td*=)NO zv7kp?J{J42!N+Eb`Om^Jb`)+H#(y1mx?o?K%a{?inbUbwxSPLukLI6eUz@C0gF;Z9 z315e*+Ln%1_seEd^WMfJG-eE+sp_Y3y|=pPrVs*t8D)XCV$tdOdKPIPOANKRHXPe4 z(Ri3mP6H(4@`3cq_>3YVw3xmfU|sGF*~J_{EdSYN;OnDZuF>@_p2Tz-n>^&3--@bw zGp)VOq&7(E&>Y<_Z+tHT*R}*g%HU7DQeD%e(7T7qY2O+s>dDww?aut#e)$*-WiyI+ zi?RW~!2JgZJI)^rat@k*)L4B1Y3h;3W9D50?2$S*yF#Ta+oyTz;brI8rq_r<`~IvC z=}LI_@JE8XAD35PXk#MimCsCJ~)TK_-eO7JO(SGmiH`J1L?! zsrdpLEf2-^LAswH4`nedvvsOWC=FdVhoYFmrODGqX|^?*KH?-0M^}2y?>^12ItA)F zzMV877&Oij4BZCX>8U>5jBhWk@zW{^+t9>S>)-Ufm&TcK znJ@%GT!HEwh#h_vwNl1vnven`r-dltIc4%_!5t5on;b^9H8&z^w3t9hL8j3H(6_L3 zmm)owuA8ErfC#C+_1?6hR&%+eO$w>3LWb8^k~sT^%9U#&ia+A^XHjDe%Y`0}qS5WZ zicBW^W)#v_?&(T2k+PR^jd*B>uV-h_3W9?X(?d!W$B+Oa{Y==pOUzg@|(OwL}_!Pv9b#Sx8CjA~(}D2yoXJwvRoRj89~mAX47Bf*34KaQ4$p zGPfZ`F%9+T5js?IqG-`FtxBEk-t{Uzg!?uDR}U*gTUD)S^Pl2YM#C%!W~*)kMYX zIpt2!O7O~~+jHi-PLYw=)=7--m7LdEN~I@IN1Z=OD=ypHV0@PV@rQ!v*XR2Mi<$hM zJ1OFwM<+=`+e>PZH-E*fm8ExC@b<(OKuVwz5Ab8x5WD-B|r}wsl~vB9mk(zleA0d&$9M_ z^?$|7HxWTwIDy5@FzK8Nid(UlD-~Dw<7rtSkC}=47TE& zkyz+J6voQKKfp4=56mmM#Lv=&`^J;j3|*?ErG}%9i#cE z(O&1*58uBDOZFVRkj2FiT33#X^3Ka%o*(FIzfIol(x<5|FNJz|A3rJ%MU%w!F3#8_ zw@f#wzkl_H)GWTUWv(|8lNyUV^1)a9rX}uU)WSu*-E(@OJ^|cRRZ;&@GQdR*%cF|yiQ0-xdH2)$yZYo(r6u!6NziiMR zPVDj=&1e5PI+o|u_{C5P`$7%b z^*Z`T197FkNa7xwm26S04@D?fT2ela)$C?$C3%v~$|n?~{K<19Fpjru5l3jg`L*4# z8GWp70Y)Ir*I9X<+VVZanEs&RE!rioF)f|23Ye_hWmyPMAC7-yG^O0cc>PzLwA?@U z0yBA*6nUuRe)E$3f>8_e;91VF(&28nbn@G`Vt-3L72B&n8l}nbDJYg8O#}3!5NYNK zIbt3Pd3$F)ANR(ZskLWe%H!7duDRYYG*T2%nnwed%@TzbtdF*T9!Exzi6_3-3x2aC zARGeps$x;%gQ!m0gDxgmo|y5M;&w;E%I;je=l52ir=1h(M7eLQ-=Cyw&)OTT^9CUGuHWE=`Q zKv{q(ic$q6)9zI~6=Jxf;|y7k90bsMM7WgdM5hI8a|A9wRan~9`~#?j**hgo)es=} zGzHZoO1e2u8INGm6ULrrC$7g&byabo@54+++MWjXQEw}Jbt)l>sr5Ee3Q zyU!`UmP6n6YTJDUAB4vVVb_~L<)wp1P~POIMa%k2%A9C>vc?Wm9>xMti{j7l8fQ}2 zlKt+8LOP6A5-1_&QA{g#$3#8qMUUV&IUrlhICw?wIRdBm1}x<^O7QV}U<#%de<8<1 zR!b~kXJiq-^^8SW`#!|s*Bso{O`Msc`5P>{>e1@zY!<&@jxZaVhq5?~eW54~Q16)s zts%Ii-jU7v%XHenUxLrs7r-Z}I$1I(0m1hompC4h6Sh>l(10 z8YXQPB6zFtGJ(C;orV@wlY^|;U-k*92Gffly6AzJ_dM?hOKMvv8=Ksd zBC8c>b?e~Dw7W7Wt|hWsL?Vijk^47F>}Y~px|~x zgX$dD-BL{cpKRZ-m}#$~vw(IYI%fm5MV5Rmaq~{qj%ChygXbF37eFlVC-H`g=wm4H zm&uSB9~1dK6y~_arv<9*PMHq?YG!&$h4?a@AI3`GLk4Se(~z5ON8Eoeo|)Lj5H9bf zl~UC<^leknv(VsR1!)K>PJ|jZV}9n8O{}utWjN+9COyN=$r8TkQb@IduIHJU3;&^ zSPqs``An_i3hN1AdZyuL`odpO+J5M5Lz&E#6G^X*HE&9fqOF%BtewtFDcga^rik@! zcC-ak8aWb2CNhGrBb&v_QAa^!O~L89w%HqkVbmiA_1L zc@L2#p_vVQnvL<{)E0Smr#0+*i63Tu-!3e`CT}s5I-+);HJ;D?7}+$+!|QU1XL;b3 z23Jl5K~pP$J2-?F+DwQAc&>C4)~@j86KOm%&dOHn&Eg}1wXKq0w07IgmBGVBt4@+~ zs&M&}1^o;t+0)itHsoRSHr~AHQMl`|BbrA*ywEm;YWmc>;=1m{1|6lCX5Vtsc|fzv zVBWI##0vPM*n-kkz}s@5|(7T zPJFJ@2BR9sPZtk(Tw)l>8sJMuW(H(3huoe>rK4ZceJ1Zk*ZNkN{yb}6etI^Ey|oNT z|FL$Xbe+ohTkaAA$>8lSL?L;p$-crvoqXN`rSfq%-!BpLmDN>@(>5c(VNf-02E(~% z0ytM}TK{uxdS@dTQeuTd;zjOn5hMC$%wFc9?urY*N3`~tEN~vW2?I#);b( zZHDWQTy!DDP6_ioC`B!J21_vyWP22=cHZ}pr;lg3%zp9VTa5sRKhwgBDEk-vFl@># z?6CR6!7I3m=cftr<^cSw!00wnq-sZCKkMnU=_ms-E@9WE#4`*E><*oV!7%t3h-sR-1_RTybo=*1T}Du!8Vpb znv!X%k~Ba5W=ycDXCW7CZcW~Y9rks?hKr?l^$n7{rE94Ls2(ZGK{ea0*lf>4*GOzW zP7R+c5j8qu4H_}$FBX=-bEs|^aj=R9vNDdAP{bo7CcHGiRH?9CiutLjGRu4=mZfl> z_P)zsrREU@FDf1hJOh@(rorj~+X(84ERig9=$#=17y<^^#3LyqXnZa< zSthC)+P10b``m+G0lhLnf#fzA%OKq_H)WQ#o`g_W7MoiDyD3$)D8JZ8RhMrm4Vv3+;%EgIgB8 zwgJhKod3P{Q59qR(8Wr3{3p1rch+sb8?bzzfP7byvnYC}reWcM5t(dk)cl^VdIBtr z%o7GqQ8GkIF*IC1{@&DmSN8^SVs%IkH&Og|wVasSZ%)AOjVxO3ylt@Pr#L^Iw)+HJ z$c~_d?u9F)DLk+t(@cuiQMmr+41BI@0~wS8>vr`AuyFn>1Fzi*xFFnX=r(&l)OYt+ zML<9PXVp{&%XA0WJ(oUZ_jk7{B^@I~VoVce-*o1|V~PF>ED5urY}o)Bu1q~Dpq+8P zY&1ZjXjmdt7m>YAnd9$?;^l=GG0uB)hP!yD3K1k2G((-jK^F;{>KejWIzNDW_O-At z@1TU$Po~YG4}s3RzqHz~K$V;TrzSaQ7IB5KH|TRo)o()q(*oD6e__u-qKNyLq@!y_ zCbFN3X9v%Kv0fKK!u6T?0pz;46-OY%5ttLLs}nhNUhh$Mb{MCKC&|9pz*vR~uZ=~o z_9hS3UbmEenw{K^0Cpw+8Kwm`je&7?zsVNkbxJJpKfBX9Kvw&_4re>(*KlEtxl#Dg zdb=sI`ro|2WX=wN;~)aZNiGyHC(OXZF!JZYaz*IWtEU!Z(lNzm(1v1Fuv5bb7t$S< z`rhIAFPa1X@1_`Vp2=V`9(gt44YXhdiSY0|VSoOGE`_I~UO=j-@nQ>6mR;-H$JU=I zd0+=s+&k`yV_a>;l_uhp%KfKy3IXko`aA9Mc#fCNTFIwqYH_$gU=BF#_v|FzCMQQ< zG;+(=v3!TYqvK~^f$O2RByes1B;bhIXwt+U%Qr=!BfV18trc5%Bg&8bnGV`NW9B}9 z=%uY%0VvvMWPzdv$DvUkV7sjX{xTjgpqn$?u>6t*%%crg-+kX<1=3A4R~sBSsnQE3 zsnx`RW(|oEIUZ;*hSC2B_i_o>gXgF=jz`R6a~hq2Ray@Wn|Gp zsF-C-hLbRwYRQ5y#&O{+>1c)E_1nxb4d?ZDFHC;JkARsd6eRBu_A~_`X8^fjUkHq~ zYCEKc@%FHb9-Ncvn+-|g?HX;G2C-!BJMCKZ-I5If^1joTQoZMQJKYX0JEdWZ^^ru( zb@a^;DZp+jPCc4tKqHKIeK-9t+nNQDzNB zohlfvjcl>>fq5c9()@=G(-9n;5x~Fr=s)RjK@di?n(#|j5gD4MF=OT+C7L_d)CR^T zA=5OGgXL2l3*C6%IzjVFtqVl2pufr%}gcUGVkW)SFY z$HgD3!BYs+f-F#LaJBE1$=@Evrh)mY(c9y^a~b)e0RSVZ%hpG5cXC^xGOb%Ql#RlF z3i>ACrX%A|ypQh=YgC#va}2&4h!&z8b!5*J5G0A=BY8j8WhmbOK9;6!h{XM#HEdbX ziLh9iYGfx$(@k5c8_f46ui7THP-28_ZH;;4qoD!KCP;&^b-=FLZvlh}v+2q=>|L69 z>L8XD$CM^>`+!YLDu5LG+@A^uB4oUBpRg!fah=>Q`8|+lGY5KmBV40BY<|7~`{se| z%;ZTW(t(nciRv#tfsT0iK#lQ%uSt#3%8&4qW4T-S1j;O6 z8Hia4aQ!=G4M0$+K^JlU@9Y1M{haD~P#U8;+@ZvHz)SG&p1WM(c2FZ27BCi}cfe&M` z^#pgnBWR(yN;*k6)X!6=Zngw=$_o`XepGF+&4eKE3oFMp0g6@-2DG;a?I~BpR1GU& zDPr0_0#2&txl-gYB@I^uhFrLk`UVNC|7Qrm?b6SNM@If?uD)ip=;ceehQ>$bcrTze z_!EG`3BzD#uh970HJ&SCW?1J$K7@<1aiD3ax8CDu1@KjP0i z2?+9V%QxXM1#q||I1wEl9euK%_#UwLXWsW^CNMk^ZvX*LU|m;4W8oPx*Rs5PybRa($9wbA6L`wq@9w*l=%n(Sn}Jpc{x`oKWW1aMO+9dtoNfS( z+Bvo<{omGb^@E`^R9gf4hVL>^YyEqBrZx4vjj#`THixxgJ_~9 zr>>mcIp{HoubT6GyxtACLv=W4;Wl$(r=BoUUVM&94vgo1f6-G=o{}xK4{@(8UUB!_ zdf;yE#@+n@KLV1mT<8-dy7@ zb(xq7-FpNG*`WFI0Z$~735x)WLTXxwJmESh#bka$h62ez;W0o9MI7}*M5bymzMsK| z%7lo9VZ4oyfz`0NNBN&W6u8KVR@+kfApg>DHN5Zep0*;Wt8Q~< z7$|mF1|sR<;RIe66NA8@Q_TPDUrbU?2k6GZ0_(rd$M0~zUO-~pe|FL(ke|Z@pl}}x z{9_*sk~?uh7LNC1MlD07w!RU#i2{i_U~T)|E>Q&_0-jb{efLq=}`i!L(_Ww(Lnt5 z<)6-s46q4R;J6EvkPaawUh-$aLP2g0xdeQG6Ue{ zV${6d=Ptl`3frw+YPFkNcS>OXnH@zN~ zv&HLLtCX^vlPGKhtqgi_gQ^wi5i2mLd6vX&_^l-!Q;_QK7(3~!;>gDPeslIf!DNz_ zeu*P}wZFd(>f|wg$+LyPL!j(G1_r)kb?=R#51@fo%0<7_EG*rzht0Fm_wP-q)d;iX zUm)#%=>kR?{aYud>=QE7d0_o0$}D>JDIDHE>PaIEf%|)QfT=l&=Os57Pz~I*ytN1C z27qzv-cPyo>W%vS7EFro#nPCh+zFs1Qc*FIjNTmhdiy8`*YW&q-fVw)IOEr1Gh3_0 z%YYC#kAUSIS~9}z@PtNF|%q49dN5AN_w5X3Zst&t(gU*h59;smGnKpla+{L z30$h1NO3m9Il*vI;AgH9F1e`VN4S)2@&a!i^d4mTH@sc*t3gevvF>kR6i)-f80NpWa1lD81 zFL4DuzQdS#3}Y#HAMbcf1Lv8nT*7b}wUAB9Y3QO)_xsu>Gjw* ztHEmEUwH>IV@Z*IfdFQ5tk%9w`C$umW~BY->gWgp#t?*f1V5`l;YM|i#XDT+gCM3`PR(jR$} ztkkpm(>IWFV8X)Y=?P0zGSFQll{*DIT-SJIG=84T_W0%lB3qaVckKqlHBg=Y7XA>G z>>8qF>^dlSJNv|H`6VZ%KpI^J!uQ8I5?`T@0~$?ADu&JsB8mQ`&vDsJ9R3!EZ z!fy2biECYh(}!e%AIKf=OptU9p=a5UN0@ zKYan9Q;Sn4SxzT|3Y6D7g9Fw&QS)p>&q$rKsU?Tgdk592R@aR9S~o$ogZdnl59}$J zyJPvus0w7zt%)L@3~{${6G3lWiU-OeWk7j|VWS4Euv^4~Pfh8`V3oTa)oD&^XQ7PI zLp84pgn~CZx6IRcub2_V{cUWvbp;SLI^t0+j=zG)=?L2Hw;nz$!0aa0Y}rzPWDN}@ z5|pJg;RPbVN=kaBLYeCqhM75~YC!k3voa-Sa}Kt)foaNgpNdi)sf!Iw_hCNWOVDuf zqTVv=ZRHo!NA)-1kwMYX_{REAaLns>MT~rdR|6zhTe|kW)$#p!f1!{ED-}&(Np}*M zo$3#rLKZo$wwm%42Qh#rvn0|$F`CS$=_hXe({ zR__p|J()L^KC5X&uQUU;)t?Lk8rFLgl(2JXqgt>rT(tnZRfOMV=9S0lRVc_S4d*8& zjqr}dkdg1NzbfbSEM7(hQ90x(t}~u958?k-2Xr+mlra%JPvF9K@aSATGsa z{52awvr+zOuixb^2@hs>GrV4|YhRjLsnD4tO~T5-TO4Yil;B&0+Ld=J%0_cD#*}5r z)PlPey__a`tc(;*$ol)Cf~+f*C}SU}@+;lK9Z@;ntHgtz#S~0pSL(7qDQsOn-?i2n zc#$?=@1<*U7ofUlLX@+0k*c4jx@l6HDAG+{*Y?y-J{*x7AW&8UQ@w6;WZJCKq=$rO z63T~#GNo4;+3y9Jc@4(DHzw#LkJQNI*O}XgO6hLXWj9EFNgyVW!G~m90j}NgEde85 z#ueWW9iV^7x_j+EV`b>!KQ-9ULbuO^3nAd+?+mB-(?mw@NIE+IhX6EVBDRkFhf8e% zXm%#F^!#`H6ujiak>2>qViCMR z4{5ng_hFXr4IBI|8ldUP;&Gg}Ry631d<^o_N_L<-xw2AwNkd+s}T2P?oKP}lEn>mt%9k#*F$qPLma8^%~KN47ov$jg9Zq;_SPm4&Oxi2vTDXh z8b)X+v+-nUgHpW8-x6AoNJX!A8P+u)9CaBOC!BxwGqEPqWJw*=I9+WjHuoCzp1t_c zpqU^rubzVZuh4k!gQe+>5d2*+iWeB5Ge{AjUU&Lg2{T7AW#$l2-XUm@~2Qy#$o=#bQa^3yq2)Eez!!ZcV!> z4{cdkwh@T1aCSql&b82Hu3itkx#R0Ru%!+^2dkfJd%RGSeirnh!5zy0c(8y@H0=+W z53JsS*M10WxdG47)`XcGpo9X1At*?Qse;x?k)V|tZn_tqbI*MaoYypOk2PF`Z_d?_ zZ1Ie}+ycLeJ3LF6UF^}7R7F5K2S#)ikmWuBzZ<->u3HTlQ)Hrt17U1h-=0rm7p2V< zn*`qD_nd%BHR;}m-Tb~c-cvxhYfPIAlFJ6|Io7!3uE#yNqAPd}7=FC-z)wW+2PDA- zRXL>Z@C%~kTgN$`{SqvXoXm%SiThNo)b$ zoJt!1o-O0nfF?VJ^dWK3iB*LTR;9vthC5r&Z+r6s8s?_}MzMicHw<8>**qlck3jAd zWo!~rv_nRfo8nDjv>p8tXDmV~H|{P;>)FhEYM&A#gqo89Z114p@X~Gi_wJMjMrUdu z_JI*nea*)QBh;#ooQZ%GFWg%afp%a~yUta62|GYB zo82DE&&v@9-DDYF9k<C9kFJkW4khCZ1xY>b@Eg?w+kce~IBsSP&yfVS3KOgfXf;ai}02E!6UoxC_1O8`h9#k2e zWQ{&R$5VR-o)f7ks32N9%VYg0A(L~n*<0%AcWtLJgsh+`8w(%~(l_+^8;d}Te(4l- zZqj#|LmH`I1Qu5WD>g~!2e-)N3FsG!hi}%>`Gl{5PuB{!4v04a&O2+MsYWNRqu7np zt?uP7l_oYMn?3b7XK9-7(#y!yfDnvqQmjCF8!SBl%hCmGi@;>hQzxmo56@2hl8^-O9x3w*>-PT2bhf)P%3?ud8GVCW6g2$Pzp zwzIsJR!n}2k_BHr)B79HfvKE*Vew?D&ZRZ|F8+e{m~cmUd(3V74by@l2snG=*iN@9 z3sWo<5E-J{i=5oIRD_=!ASxR4h5N|wbctwS09n@!_WDrmPKC_uS!Bht!ZmT|H}SkB zMXbBxFfX?r#x?CF_KGwE29JI<8)6^xF(G|8dhTJ!Y8Ds(uqp}|Z!wkCbPtST>sS8i zn_5aphov4Jzql!7 zleXDQEmHeY4l?)J^rL|_1XG0O!`6MlxgS`v9oP^EX^|^uADOJ;Io(1v*-$LPwD`1) z1f+V1gXYn3{1L=nwmnw7kx#h7xH%w2m9>knWdGwJ*zlI$TUdKip@Ru;6%izj1e~K# z42JWq*t(?hpITHYgY~7b7yggl-ZCo7u8SI_ySq!e zTe>?%kOt}QF6j_yY3W9~kq`;#?vf4xK~h>c`+nZ%eaH9bjB&m{$Kf9?Zmzob-fOM7 z=A8TI*1l&LDm$THNp0HF`s|>Pwo;(&Dc+uL^PqbTt^_8o=(xku9#SY;*qarI5zDXR zY!xW?sDnFmPtG`h$ZRstQW%2-#m`CA-$Nz7UIMP74G<-U#8z{#} z5N7A zasbzb(HWcuUD{U?!6#pXBeM8EG-P=Ydu!t|f^ggw4zs~KjH?SMMyvq!fmm1Zd;|%T zUK*<&O^C!Mly^8p&t(Z>9pabs@J8^ubN2Lf9e#W*;N%v`bB6O^NM|~E&1}AEk`!gs{fV)_hE0Y8~q`VqdN8yh$xV5LjN^Ta%oT# z3Ap4-u=?HnW&?`zWPr*K^Gu2qZ3A;XSP*?vAfQnLDCFMPL1-P|vgJW|IL?WoQ$SA@ z9qLf|2V|2d;lzcc_M{_x_6y~w;IB2AwX1AUiJPQ!O*SX6W2lmSo*TXnDl>n5TGCYG za4y}4g@G74k@+9j3l+DU_91}ClmIFOWB2$QTJ>xJji>9@Z;Bx8*aHX(A(DWxG_rYb z?>Z=cGXV?(fq_T}zawr-YDz$zmJFPEm?FHut&gmZdj{_TSqyxQoQ?#VNDimN?jFpv zvdc^#V;NYG@>@Ltd}pgSX8hkkp9e4}g)1d3f8ASAAaiTdfoSl^N7!|0$TmJ$QeCX# z!W}!Novh)$p#eGW9IX46But0*>4Kpt#upF>h2Y}gWqt5BsH4bUJH&H|qfkTCKNefP z5uv_TXx_7l!Gy?lU;T@;L7P9$&xBn>As>J)q3eAI(?y<^oRQZ+dW8%Tkna-!wo?Wd zCi!RzyOBObx28)QIatbxrz{O)Oa~U+xK9O){zViSsF+xsectpe!2ck%>)iv2Hk3I& zAHh3G8$>js{Fr9Wve?B7bPAC#Q~d`2(CQ_66MXt7v92JAIXi{;$0vQlx;7)>gjTf4xeAFAnUzh~Ut|93KJ%1V2f=fq-U7lzf&2#?gAZ;>w?rBLemol@?uw@~ z|8MU%1r%qRy@TDuSe(MhrY~7Pjo~Z-F`!lVoT8O`a?WSc7DyC%EB>D-qByaa58{x% zRp(L&eIwl1!w1zI3;vhOneC>d*SUB1f293?x zk3{>jy$2q+rOfCkJDeVxle5tMlP6TI>lmqYrMA zzIv<|$`}v@G5&YGDy`K&gMx-n>03OO#OV@iij+D!<)KEcp}gJzF}9S3R~f}W-@8f`h}DJ%sVd{#DoSuJ zgD8E~NQc$Bnax zBl4rw`R#W*%T8-SETr#OqaauI1aD})f*Np@j>6i`p+Z2E_!UDBy5;T?PE=hGrxQqn z;Hc&QFn~e4-9yGXgN1JKcAm$PT{igB^TQ`t0`Tm334x=*?CF8A3uxrRQxb{z>lex( zLy9blOHAb3IH=W*R=e1amp+wZ8KenV!C2pbwaNvcj~z^TfFAfB@K{U()`cSglKrCz zU~)*!jA1DbWW7gX@9sTB2T+k9E~Sv=6BNF=9qGQs#+h+TVBng&4}aIYPT`TvvAqw4 zg1ZB{db8I(Y^`KMY5C{vr-3pfFHFd1}@4(eV$^4l+{++Uwm!u;I~ z!#t`af2Uz%#clo1!hggxvR;n3-x~%8$v+r!JU$u%NnEJ$`QzPXiSOn9%0mRyPHGe| zzzoGOrSzQBcwHoij=%Nz7ywBWn(XG4oH|bUdwS~tr_i@99FUdpzzWP^lB7r~E0}zh z*Gr^m|r!C zLG>{Q@4n3i<5duGw%z+CgDb{z6XV-}1F&;p-P|Yprq~?=`J@KRzcCLMM$Tbcc)iIs zS;A#J!U23k4MER=MiHX!`zo0)opXU4Ulr0)2{EAr!gVo5>P7_^cyI<(-QwAS`NhNH z@umP(s07i1)_~*s!4UxC(sOj(7S>b^JaWg6*YUAifuyB44gcxk>aeJ9sf%5N2c%O= z>pYr!U@W^>S5VLg1K>g~7U?HeuODbmL?qOop|UiL%l?WN{yk;PMG*E55I|?T-ubry z%oSqiD#YQKYl|T-fr{+&9*mhN=Drb>|BSQSa0!sXy>Qe2{F*LDd0>j)>#nzy2IR5W zfxz!65Comvu1`gbWeI$mBhWAho`9Ajj;J(3^D+3~MX1B+L<`)9hDH%$0}R}5PVZXd zuWlJoF}cd%Zr46L)^vGAv6%n2w=f4l9b05}X!;*zL*)UoT44}2I6;bXI{W-7 zV1oMPLVF!EzwPSm>g7rrs{x)i-uHkh>`l`%vrV`f45+|wW~FkbzDR@Yn!F}@;0O{O z)S9dg#VuhD1e^AZxb%34)9o3TOwcTK5UzZuf0-CF;ov$dHTi?s;a zx5HP5vk<~>KT0F^?ngRh6ge2c3i;Kx{ieoHuYYOG*N;6+FNGS^xjLn9A!y*GBb~K7 z6j!jxo$0L*=##<|fAvFh0-aHm>X$Eu7%Ab%2w_QZO#H1$-$y|a`t?gk2fF1#v6mCM z0jH5t0^l@~jQo$&$Q7iN8+$_}E9icl*)WOpuOx%TkB<$0*&2W3m_QLchk{6r;Oi74 z>CyL%Nl@%sxPZ@-cHvX5*LPZWT&BOdL-v9oL0j8$_)imnAv$;47uv^GeAbxiL5LCWP z1{abc()#_6v!{YoSf&tf)5CW&0mCUsLmmMQP2JY5XqLA=kj2l?f0Zt?%&ws7H z-$U*eL*Yv5U+T^wIGx)6)x!hATXY{KoHb4)hOh0oeyJaJ_I- zzutisi8+evZ0UG0C}cnCyi~t9+EvWy6hXH}C_dh>U#P+kNPE0Wo=3e?1ZJ_VE+lpN zKc@k5a0q}g|0!$>pG`RE7IOwhAxcEqW#7_RE@k9R8h)E8DqmpG)l28FP?hO%=vKq) zxS|#BbVG=3&#&;TmnFX2Lx6^yIPOc6&mi9adLazcr51x0v?e8On)4pW)JT4y^ow%98l zws~|^w-AZdejyu7l%_R`*k6F+m|AZxp1iz(C5Q&>y`>d;BEx6j&_?$r!_yf6Y-i<;CcSGUt73?f2%pB?mBdSoV^zj#aqNo6l5~K z{hwNMMTHqBh!!YB{dj+7yEkjlYdAq}){+t2tn_AP)eUmK_ha7kqdc=t4XddC(>TuV zuPPP7t3cFnLi4F3oZrOqw~Y}@#^GVTj(?jAk(ND=g{g2E)KaW-1N9eb&?iJ+IA=Hm zxU>+dj+{_-(BBEtjze}&F@+)87w#wdH1J^#bQwn9g+(Fl$Dj1tx5`fvuEGnpAQ7^6 zzGo3qd6ggb6UY)OdLQq8ny2FIe*#9^{b;!67sj;RUdR3VOk(PId)vg*@)+24Z-?5w zzDhWKdl#%#VaOzv7)Gw6VEM`r)o+Y|ag@tPY_~KJqe&i>1BWv-_KXCx>-hTB4}plb zefdbyDn4n`i2?)?YmMdj9k@M7=Mn_{fv`KXGzF>?I2v|~1(A!yhtqU!%Tcn#1RQ~) zAgQ<19e!x5%0wLSxsDV{;eP?2A2W>NI0(BS`cGXSW^x~fuuAFkSr|kx3|O($Ae7DH z-7>Ty*O5PTJTZ+t)qKky6J-|yjFmUkp+sA-$wyAQun1_0m}0jQgkEdnyCIwg_`Uf; z2(nxjm@JL=yl&B9bw5J|dQDNX(?PF5 zDJstMy&!Kmn*UXOJZoQByf9+ar?98QzD-|&!&gldeH9EYBUVvw(Xp>cw+ODR?}8E0 zaUGunj&*&nl1xABfkbaYq=d^UI`lhmsF#QOyM2;WE5`GnaY0ayKehK?!ezohCB zco)MVwm2>eaZG0yXUnAP_8Rlbwm~ia$7jljE;xqBYrOa6SPvp4E8sz-0Vt&Bo{R&_ z5??@|Lg2Fs{*rVrU(mXUJ=dHru+h+Hu$yPTC18Eun>%wLyNW)sgRlu4DAH!tgpDH9 z;d$_Mnndq)J#h-8%s$T$_tk98Q8w7iIyK72RIiOYVcw%gwyxGcfW}u)^2x$WCy6gw z-;MhYTN}v{SIfVitejl8*n=MoX0;ajp>+^RaE2J6ue2dH{cGwZ#1L-S53g0nG^#wu zU)ToX8?fKmmjc3m{`nbKDLF++({K#(<(ya&KBe_{H{{vVM)g#N8DacicfURE5#d{Y z&lit=B?Ld{r-(h^;EIrHRr!v>r^CzBs=h72p$*f5ZjmklG~g*{z(HmhssHIbGTsocW_Jb zs4Hx%t`{dY78jDlAK_jz1Q*yf#?giQ?2qZ0O7hpI*NKZ;v?4A_kvb=hCZANMN}EC` zFE8Xo7@i+CNFns9H7Yn~f1&O7FRIjITyBTv7vQJ+N?G!yNApjf=IA;_ga(ux40ttU z>)J9{!*My8A`3YLoYi|MdMpcL}Tw!@1oJdHc5~R>z9+9IN#~g ztc5-$zWj@M!}!ory}DaVXoLh+ATU27&W>XuY$fc~;9Fk?bhZQdFQMJ3`&|Cqo^O1| zvydTZUt9pplQDRwE&o%Kdw7fj3ac{@Er*NAz?UQTwa=4&Xh&#S$?IK+v#9F}S>4zh zpJW7bLOGL=S1$1{$S}?qV9D(Ku^)2Ab;9wMFPF$%3FbgIf#Mk{Bx|K_`xs*Rbyh=Tw(e|H3D@HIr+mpsv6>+EPIkNSwkTNG~wt z`j{DhCP4bMCwkF+ZI1ce6y|z_DT4P7I977lU_PS@>!l@%1~AL1&9fa(HiU~B;MlfC z^&zMee38W;ydb~IcWIHL~w19G;(Z zB5+3uPv}QC8BdK1TAX5rW1I=GZx(Lj7x6l#l=&8MOp)r3Sf9e4%C3?e5 zsf*#gvKuJ}Tlx~(!?)*~_{4ry2}N!rucB_F?>BM=^;`}mJjp+Lbx_1?{)rDdAuv3S z$i}m(i&g?jk|z(ogQ5~HB?wc!D-5fy0c_DPG6{b+wX-z{GEHRF9uww|m{0#PZTpqC7Rja0PFTZY-_euEeW(qp|eS@ z^6PhqyKHGd24Nr~b%J`rcH1G7)wn7>*n>c{M{q^B=5^wT?`lg@;BoBa)p4Hgc#=^_ zc9*r2qzo4?bj0{>6qg^V2d35{_Cws$tLmv+jCWY+^*c9YUX81+(@0N+TZY9(9J~b{ zz1OcYHeERbn#j-XR^E$dyf20AXpfkXU!AXQr~V>W_{5j(Y5e>mKA-Nusx@jYbSvx$ z<+z723DGk|da346Z%~ufl;UzaDpa&D4&rB2Y1eYRMohtVSgBp3eCt|1 z=_k?^@>MRIesf?_^Qtf}<)8Wjc-s}bZf5S@QiKR`D*kmf5O*Om$22BHk)dZ5@I&v; zBXa6Nk|K;TF_a8SGvc5?!fpB$QHtfS;nZXQiqVm!bD-3Bddyzv6T;Ibl5O}2{9)83 zz8C6!V*ph~WONJSR~n2jGWVz*J^gMow~p6sm-{2D&R>ZMegZ6xSk%wsM{ReL*_eKx zS8N4(b7gsp=9E{Mf{SqW3JQaA`k{W1BH3_5IqPJp3BQTdQKIJ>7cy>bzMMI)kdV9$ z5d}H$rinL8oxR_*aElS?TfiH2Ftq(%Ps3scfE||_-duCQ!dY2KXLFvl@5G5U?830{_j~8;;3( z0tt9k$xRnBYQn!LIdgM;6ezt^duNA8{*2c!@&Np8gzQBG2y^x^LI>0tCyCa7nd??H z>#WhQ6KfMos8l?MKG7Uq;X2%e!TPJwwulzHrqguwZVdmE2f@0zln9PwAi$Q5akrEqb(%@t0*k+@GH}JiV7K zgO&vAZYDB$czy!!qRGcHP2+X1k@4}8I8y#8TCWBqK*!j&CwOD7v%(YgeV`{ z=<)q?x|lv_Tc)&fw)Ns5N&G44_tzVzidWPz`hN@YWo9hX->P}u4UU3}O6ukHsj_vp zFTLpXEv^=*d5+d95v%uk3h2%neRW8C>G}M$6;YF12T4COhO#y%_1L(My*Moe)~Nae z+iOnR@im=m^YQcX%Cq~PDs8;Yl?}?0ur3tSM6Rp^a#Ln%!Zm0#h0j3$K*6)elaky^ z4s-_y=xW$e_e7Mxe8%|}r^vlyH_2{h4m+z*Cmw);u0*nRojF^_*b{%vZImjq(W3fv zcX)Cp?^z_?PODZjS|;@Vhl?m^dSr<`ID|Acy!~{~ZE0;iDwE!qAP|bM#+?ZE?lIUg zIy@}P{fI6C`i|d*5%3IBdL*|yx51gJ0U1`AB} z-;w|Ds#04HB0xz=L#WHy#}t2jMYG)&-+)_kYq0k|^KiH@nYlLO(v;$xGcU6U#(@%L z+`@6L=+(V_?er*zdWiu7`bT6{m(W%3-4OxstjUKDr!QX-v|8P8p9~(4O~b7B1p$3R zKCp3B5Zs(QnyXe8ST5CXK6;&`_1SDNN^laC?$`@o=#Lm^gRpq*y$~j^pD-cLvO_X3 z7BpR+zqI}Sr0UoJ0DESL@Q?pGQH)_#d;nC>3a zeu9f|P9ipzBCZxdv|E$W0SrB6Z#BIW-IJvN>=OSsWrpqdZw{-Vk85Mk1yo>|sMoRxL6LkT@Ub_Jr7sCCq=$k$OGun>ttpgjJ@RIMaU&N127YHdG;s>akTo z!>u8DE0(T)D@6pB%O9hwvL`-mSDOODTEgth91=Nh!$+DWCDXyH(!SG$HmHNV77=p& zWZdq=>849za>cZ_ylO6-?ZMb8KMXO&k2y1|n*IJT`)x)1L^hff^}9}7xJ)5@n-A2) zYV@|~q(Uh%DtyPDqip$*ai)|$|Qka*)UIs6Vwj0Pr8PfrhDQhO&&DY;W9_w1;io1`q5*vQUh z6Lm0_c6JpD8<7T`Xf{<$MU;J!?FMD+21}ezR^A!i3$fn_=5zNE?GkoIm@6U*FfMMV z(w8o0hBby5xFvtuv@HC+XE$Hz9y6IMIHu^-+4k-M!vmY+&Nsy%m|WPS7zll0TYm0M zL)*$fD=eZ%#vfOML;2%@;d#$#(liW8vPVFqpWk>bpc|)}qOdrRo=guHnl+io|fs&@$ z%@yNofdN)mf5lo3kk#Lg+*eObW+>2pr#4@61IE{mYKS+gV0QQf>3(r)Y!h#)@*UES z%Q*f6#6ODZ$6XcQ9cBk#FDW89RHr*c%bK zHk7?p{j{YXnc2UKs3hY6_NbA)c{xx88s7*~27ye)DpwVTcUXOcwyUk?+lvWWnU`L- z0i&3A^gJ(kkmc4HgHxm%VoWsE`c4n2#?N(^rcKR$uslW;QE;^w8pB}xd?n29Rp4bP zq+PqVSW={u`l%Q?dp?HKeZQ(WWm|sQ&%MXbNo#s5Bcz8aEDVrIc9of`ZXP85)DSz0VIMm{neAY~|$As3rJwOzCM>Z=6(M z4`Aeck_N5+A`pc#t<&v?=UCSl#y%+EnyM3)$R=K@AoS^6=E3_=ZMx5Cw8>VOTD8At z$ej5Y9!s%bSb~f|Y^*BD%C*6YM=(DcT_!6+b#2SF)achEMTqSm5J%E^922&>$Gj<0 zF4=7E3^VPIaV&iz@LJiG+H<6TKKlY+5f((4m9q?{ULb!nrd`jYiGKrU8opKD^`0te zP$NMxH2IRpLs2G8@OSu5wv^c+Z7Wp#hr^^XhfT&fF4KlG&O6;BDoy3Fm(RPQ4L-_; z<`gcWqUltH7O)XIItx^nZqW2Nc7t0{xXacSY!m#2yv;j>0c1xhF@IQ&Dk$4XD(2JD9HroE{a@77M9=1J+j5eI6~vXuTlYuY$9!a z?iOQ$N=K-;Eyfm&h`Bcl!w5%C)&RN>WYqKV=MB=UTpVvtzF3ICc#hB&$)~F3YkGi!?<*7GpElB7b9b6e9v=0EU67fmmAsr!d6>B1}pkZF_I|R zxy$geq`)GDBc1jkquBqPppKSix@<2idALx-Sa~!)o#X8U|AVc*@PH&^&nliksehp9 zQw}_5>2FhpFKi2uJ>~~AH1#N>WUW~8_HHA4d>4XwKhJ91t_>PQUe`dyk60!q#XXFP zMq8VYv9H5IwkUGbU#VHM(xkxJUcK8WdG0G~W9#>j9{8?&XsIjVbiF-GbAlwkD`)%O zQtee)w%}=@VG5kN8$|s{T=jB&=`BViRs>WbIhY3ZH;6|uE42y=I&PbSwwK_ z5RfvG99ifnpK$&TW;i>VGI}{o7Ky!c%HInRO2$lE9z8=wyX8d?34!MH?;x!8H>OWz z$q)%i-~159#vkNvD{m)#j zGiCeXK;TvX&-eu3F|V)wa&0cJPA!NbNItDEjb|INw+ucJmX1v=Hwh#=f}Hg5Hwg%J z;Rp{%=GMhEMjgk==3aC)ZVHxy48GK@i8@uf!>Lm*uYl~-miT#ZH-P=FPgXZRdw&~Si$+)n1t#yu0{bc*?s^AR{@Ct%W~ocRZR z6noJar`AqC;4P9PZaZ;*cQv?+7GF~@Y|XyoaM6W-BO)_ zdY+8qEn8UUNur=niNs>%+XSnLPpW{og~4sgx#^nUxMinXzd3S%L;W(`(PO?MRUwJf ze}Yx79kIRHegOqzbwazP5*Ka0$9C{2Wqfu1SKu5}`}6*JDQ`-SfPzlqfm*#q-fh?|iC^1w!3}oe)xV_-*o0oGa}A>{ zDtX*Z#jWxNT~N?}R_uyBPl=XeQwJqe)}NWD;qy0ev!0d=fv7YaqC{ zQquA~;h?sMX6y8{Q~Fn11lq4hzqXz{kBi~yu2a_TArm)f3<_cGGKV+`=La3wVSN28 z{^}dw3&QMKl;3th6&1rN;6`) zw77{rZEaRkzC|TYi7_)zicZcghK6-67>Fa$GYPTr;2fR$bv9!S>ugY~}^#gmeB{js0^&N-4 zPHSPW08l8kcS@6;*gAZ9`Mjypy!FSSZ|O}<{L&ff4;*XK->z(@pL0hUiX6+p{>Jg8 zowtQU6rMMtZs7+x-WcgySOVLacd@S7K9RxoM}YxyxBY}hDie`|A{Xz;bhjLc%06t# z@SDFbFpWlH;;Flp%<8-7Jp<-{!`{uim9=m=IYa!zGy@|zDkgcZO{#lYd(j_x7UPRV zVa9ik-tg6J@CKuxPaxy_tEpiafz2UAB3IKR(TL63hfp+dnm%q}xkmNbZ>&2=6E|QG zbEizwQEY0|etkk{#UW9WZL~PV85tc-&1o7)$ddrhE3i7efA#368r>uuk<*M$RQ zW6H#9Y{>b?m-EgOO9R2IyKr3fUG4r$uoL#Q2)j?s@%A6E8^=srj_|Plx*ECg%3~W$ zJzt;E3;AjIpKqy>b*P-d#&7{ha`5fJmp(_I8A=+IEv%LGy3zOpXHx9@mGBS0`!o*Mt!C&uPqMxw>_Yyj{N=$^n@6KGrwQ>voQ%rB24?c4G>!w zGrHej5zlq{qWU^21mrk=F$Hi>i0dJad7t=2eclp>kwXT<>s}Xn^}yU4=2M5eGni_0 zObQxlvYn;<=SoLXcUmUl0F}~D{Ei4YF5Adv>hct>aDPrW68x@@PNFnr-hYkVJ#n8D z{}!_wL7hx&sPYTRNU>dUW$&T^N^Jp?l21^0D@Vg{kOpb?U#Ky~gS z^l^ev;PcH1Won~K?G^}$*z4^m}jPelcpr)!DgBZFR0d7hFW@m`46_-XU zzE|esN8Nz$xV@VnY+wawZbIQLIm$(TjsrRS;|&KC6t0t^jHFiPA65o`V*@@y9~LHI zr{xRQdL!$;x*eb#$cs`Yxx0ZT9avuTp}XbMsU%1^ck8aQxkB;WMozfqdrd>iAJ9+? z+h#tPupSvA0`w3TWd3XS&2BOWU>iNfn3NY$7L|6wd85The$QA1v-8Tr+Q(H-67;EV;iTfb@DM1XrF8D;bx z)PdAI)?#vFAD|C@Aj7aebQBn(5GTOyR4uLFIT4|V)wZT(W#LC2 z%f36QmJSU5#7dLRxq48u=1-#mz00Fu#?v4b9l4q1oQ8TaH&8axy43tD0w*Y4T~)f9 zs}!H5JsRwE5<_?jEr&|hy2iLHk?8 z3nnbd-cr!s`SsEcitBH3aydLbdeaf(xcfSIEpu04@ynHjg4b3X&R&^#~vA2Y)*X`D1J-kHrfnFtUXP_ahryx#jm1rOP=i zfdwzcdpQz2_`@=%Ip?w+z?0EjnV$#mZN{dN!tm)BLtw*U*D%$1eyy6A>qP6?b+tu@ zF}R;%*NuP5!EWS0!_1_S71>A5(0#RaLzc4eQ9gn$6E|ttEBkr!akC9i3;kFfJ>LK? zh()_PXdSe~N+@5@s={;`NiVlz{-0dfLjOta2WnxCoSf^4zPfuXr+{c-C9>D3)^eMewasI|v7%I}A0Y@q? z$1n0$O}j5#_L-Fm{g}u;;oIYkgO5AliMgAljY*v@edbZcH%bhET@X?w4rj*MkwtMa zS*;hceExtx#EVXggKX>B&1C)}F*Ht74;Qv91FKVJVVRE(G_RW&e{ifBx&9OY)v5-%NV*>yuT9e&MD zw5m*+@k96^@$LKMN)eVGtB?`b`n8Y~j-Wf`D*R-A4p~2RyoGo!s}GcEFVEYShoN_d z$p2bMW}ygMb%YgpLi5L}3e<67hBj`Ya8`A{mnO{}8ZG@Exjc~TdreF-luXm%(I#=r z#b8Es`i8Q_hc1MJi*(H~L5Rnw5md6iiNUn(Ykve4b7!hFmO^;s&TjxL2I3pM!-j7sL~9GezHZv%`VK`t1wKCmNQE5UU%_~uPALCr zajNHMC$Km~>+~EYpjfR|nE%pYP$g?oJQK~=yVyg0p&55cF z9s2M`;qU%LvRnc1#d>;D)?2Xs@y#WM#p=q@H?0Uiri0-HEmDxz7Wezoh~5^LN_>A3 zBjlv0XG52^4g3~XLDI@w`AI*x_M*}7yXnWV=kup;NX;b;xSZ^H8U>AM*VY7fcvEt| z#RJ>JHcR=IV2f4~V%(Ip5AQ>$H17)BD)g&S_=L%oZ3J-e`7mDaV2F7aG#aghq8SWR ztKex{fPF__MLB5c?f!adF&_)+zUBIn=6iZEW4WqYZyLW~M#`@?DiUA7!ntCVrktzO?Waf*sVajQX*1 zMraJxK-DlNimegZ3S!!`ynyTc4?&NLsW3wlaU~?>B52k)NuHgq>GU7}%HE>6D@E#! zIgcD8ha(6Fk8h5v+Rbh5QIQ!Q8co+9SJ)=c?#V}Q#a@o`a5F#u?vlV&tWsCYruBXC zR=~o;z@9M|fxAE6v@cplilVpEi3{U_NybHS<=jk+3SOF0ckuXlVcC`4SwGbCFY;5( zkHv%0)bO4t9g1V!j@p=;IHtwG87Td3myxBXi?UFACgpTOe}SD{mzP1C-gR0~1e9Td zy>nD}U@%=sd`iWi$>i@-mnMGoxSGI$~)CyS-u>a^v1^aI;U&pW$hU4 zy_u6mk{C->k0B}W-nw&yBXr%G zX;7DMdN22XL18gYcKx%L%^9Qgmav4LR#4z=(%nc5izo{V-XZF*S%I4XyPA_n`r-C||^+L@io zs>w(ZNC~e83MA<5y5CrFoJA|^7`j>}#n^~BjuOayiF9GxclIqSbP7<}p8L!(t9P8S zF0pGlKGbskyA7V<5E8;+Q&aD+udh+&Me(Ot(*G9XirD*mf4 zd+2x?MO5iFO)gQ_#-ZZMw$Svltpkoem(5fyyYK$&+zRWn)VD$i;%41hByiq*&(1e~ zCftV8=w{C8a?7xMA!~w*X_}NLHHeq?e7|AHkR>k>L^5IwYd2VD+Pu@NOLIzDF3FeJ zMj{dOyBBplnhR4boP00CQETJU*n3tc|#P>zzTI>nJT>6P#`Wq2n_^Ufpd zYlD$HTe)-MqL8{&E?~AI#%rb!V#!mjuxgi*_9A(-_Ue|yQ(y~pKEl|%uBv=F|h@zUdFtOS{FT zk4jO|#->Q-?8s#^-PB=T6WM|(n`tTlt!tZqOv|2_17-+GNEp*hVq=~*qP8#o7(Vd?}4^kEZZ~hDlpH1jqfzrsa>EM-mf5HVX zIy=)gVj=|N+!L@Q=HwiR7V!nYAj`VY@IV5liDjV^IvA9+`}x)0Swf!7&CVNY7^&S0 zNnpLztz#!N+B5LsdD;MCZCXTST|%N{CTxQcfu%(Tv~We}qWsyJ2oY0j?~42($;Qp% z-PYT|ein{ztdsdsa&R9esa~Bdb)tc9BJOoZ?d4)zsYM3Dl)l_A4h9B!rO1<)(>Xa# z7liFWJ8OOZmSaA*XTk@t>#VTUYu8Dp6rgWk%EY0%2TMO@t6thl>{hZ<-TnI{@3@74 zSdQREpvYb5XkY>{zP%~am;yrtVL%uaeM zb`gr1x2nvu?00^5`5ch*y|@7y!ciWgGFc$QV>YI^5(AZT;g6*Eab$JxECSa$1Vo^0 ziQW?(@iagIij3 zy`IX`z4PCWyk283cw=(7Lxn6f`k;L^gtdh+&~C|pn zKD;0p*u)XC$_K)A#`t!taebHMq_BP79gmpJ@U11;NY1j5xT+~h*e77ec(CsOyA;sf z6m>*K$_*^GPsna1Pi_lgpXq+@;FfEy%fmB2^wdJf&h{R*)m#8$wzK2Y>A6BnWgoN0 zR*W~nO)iqv7FK5>GgxwCBlveX3|_tVwg3v{b>Gh_^ca(qSMTY1JP^3u$Q3J?1@mH7 z#%g1p$@=Z^oi3|WISM2z`Wd|I~ba~^hwivhqzFTO)k<#;zU5&SO&WPRheP18?Pc?VimLs zJ1L)?7FwL^ey*tBYoPkBc4x~ZxP0_=w;jmhu@P&XGqM53I|Am95>xpkj5P>z@iCQc zGe0%U4DW>&$S}CkO~y!Y2YW5`e7=KSHO@~ z>&%a~pfVt{3{U4DZ_Y{&jzsczHJJB<<~9z!hVT}LH7VtE){+wC^eHe~Gr_ywm-Wo|o7}%R9j0|uqP&64%!dYPiZAL> z%BbAwr%F2szI>8b=;a8nd0$b(7a`3D|L@3%3@rGN%*m(E|2y=WFJDdY4Z{Zw@CRD3 z70~k%nKq!|pIL6i^X7ktDiG$F%8q2Xz_0&vGDckVa!q#JX8Y3r9a5MN7TVLwVU+v# z>RPath0vZOmW5v9TK_xrnFRE?DscP%y;iVYz6x)?TARvTO=iV^E+1@1^#{S=$f3jvaFd^fG-ChvM*cs(q|ImOo;Fm6+R$rzDDY2FR!ycx$|Usv E0UWExzyJUM literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg new file mode 100644 index 00000000..6c3d680b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg @@ -0,0 +1 @@ +Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.gliffy b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.gliffy new file mode 100644 index 00000000..f4250410 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":358,"height":310,"nodeIndex":182,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":20.000000000000004,"y":10},"max":{"x":357.50000000000006,"y":310}},"objects":[{"x":254.50000000000006,"y":246.0,"rotation":0.0,"id":179,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":179,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":70,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":72,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.2575759508250144,3.0],[-2.2575759508250144,13.5],[-50.125,13.5],[-50.125,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":225.50000000000006,"y":117.0,"rotation":270.0,"id":177,"width":220.0,"height":44.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":178,"width":211.19999999999987,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Notifications

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":-23.999999999999886,"y":117.1999999999999,"rotation":270.0,"id":175,"width":220.0,"height":44.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":176,"width":211.19999999999987,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Authentication & Authorization

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":-67.99999999999999,"y":117.20000000000005,"rotation":270.0,"id":173,"width":220.0,"height":43.99999999999999,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":174,"width":211.19999999999993,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Registry Service API V2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":117.31462811656212,"y":201.0,"rotation":0.0,"id":140,"width":77.5,"height":30.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.document","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.document.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.192307692307692,"y":0.0,"rotation":0.0,"id":142,"width":75.1153846153846,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Logging

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":192.31462811656212,"y":201.0,"rotation":0.0,"id":136,"width":88.93537188343794,"height":29.999999999999996,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.form","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.form.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.3682364905144297,"y":0.0,"rotation":0.0,"id":138,"width":86.19889890240907,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Reporting

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":52.50000000000007,"y":10.0,"rotation":0.0,"id":109,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":79.81462811656212,"y":55.0,"rotation":0.0,"id":108,"width":201.43537188343794,"height":124.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":11,"lockAspectRatio":false,"lockShape":false,"children":[{"x":92.5,"y":54.0,"rotation":0.0,"id":102,"width":86.43537188343794,"height":30.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":9,"lockAspectRatio":false,"lockShape":false,"children":[{"x":22.5,"y":8.0,"rotation":0.0,"id":97,"width":45.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":57.5,"y":0.0,"rotation":0.0,"id":95,"width":28.935371883437952,"height":30.0,"uid":"com.gliffy.shape.aws.aws_v1.non_service_specific.disk","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.aws.non_service_specific.disk","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":96,"width":52.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

image_n

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":28.935371883437952,"height":30.0,"uid":"com.gliffy.shape.aws.aws_v1.non_service_specific.disk","order":4,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.aws.non_service_specific.disk","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":92,"width":51.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

image_1

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[]}]}]},{"x":43.93537188343794,"y":24.0,"rotation":0.0,"id":85,"width":157.5,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.multiple_documents","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.multiple_documents.flowchart_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":0.0,"y":0.0,"rotation":0.0,"id":103,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repositories

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}]},{"x":127.50000000000006,"y":270.0,"rotation":0.0,"id":72,"width":153.75,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.cylinder","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cylinder.basic_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.1000000000000005,"y":0.0,"rotation":0.0,"id":74,"width":145.54999999999998,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Storage

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":103.75000000000006,"y":29.0,"rotation":0.0,"id":70,"width":210.0,"height":220.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.aws.aws_v1.non_service_specific":{"fill":"#FFFFFF","stroke":"#333333","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#FFFFFF","stroke":"#333333","strokeWidth":2},"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#003366"}},"lineStyles":{"global":{}},"textStyles":{"global":{"color":"#000000","bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.aws.aws_v1.app_services","com.gliffy.libraries.aws.aws_v1.compute_and_networking","com.gliffy.libraries.aws.aws_v1.database","com.gliffy.libraries.aws.aws_v1.deployment_and_management","com.gliffy.libraries.aws.aws_v1.non_service_specific","com.gliffy.libraries.aws.aws_v1.on_demand_workforce","com.gliffy.libraries.aws.aws_v1.sdks","com.gliffy.libraries.aws.aws_v1.storage_and_content_delivery","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.png b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.png new file mode 100644 index 0000000000000000000000000000000000000000..e645df1e3a06645af3cf072844657e42609fa125 GIT binary patch literal 24298 zcmdS>^;=Z$_dSje-JsHv(j`cXbV-9Ch_ti_O2@#^3L+v%N_UAgNQd;$DJeO04>^GF zx$*IOKdwYp;F6o@pu(;orxHKp;da$_hFV2pSapsN!OS-<Rx-o68sH)$_Uds4V=1YuSO)kz89xpgBHYWV z+P)X<_K&ir>$hLYpyjY2=IEh3TzoSbi*fs>pvJ*}Ul4oNLHjIFjzpyBcqx6gnmBHX~BQfxiONUZ# z`vR`eaA+B&ug|l23@Q}3Ui^67YwWf3!KItdvu9_za)9k%-e*5?f8Ou%?a_Yqq$&5r zcRfL9_ZK?*=2_=%x?G>LJpu3I*L8nSXLG$zwp>uF32LFlbp7kA31%Xz1;H`sEp#qD z4_yA9O!krM8$XD+FC0y(C81sKw#oFT-aTgJi@0W*--TVFSxYnQ?2qSYjtXhr*7u?u z7z!!yZtihq;AdJfozp4H$g02NgkYg$_Q2|rU;KFaz9k8bk*IQm$1WkH<@#cKG*2V>#4ng*Tsn?hUj*{Ge(gP} zW|U#|8|o9S7Qeq%tpR9j#|NF1W<4M8G9wmlu07#va5ec}`ER5sXb_b-7zZStPJEmLDmGR|bDp;<`R)vx2(ZZ_$`d!DiD+<7wFanarsq znSw2JyLXqsc9$}~PMEI8LTO3=)y-2+WYUs6iLG-|)LFlwd0X}MYei|@S`yJ#qR87M z2{6e5epl!B{zxteJI=!}qNLYTTt8z*e(Gcs`K+D&6uy@1_)b^u1-ql{bs!G?qYSN! z&HQvLuxJT_LlR>JZ>Jzp%V#t8rMRy8D$j{Kg27zerMD$O8b@a($##y`yW3-&Pnvk9 z8bep(kmoCreRlb&Y%y#a(G(92shs|TgFrMj@3j))0xsFtu-|N-i+r=3Ht? zp*jLihNN#CbcIsLrpOA|$$Qw1R;PT>r^19t=x7`po!{3L<#6IG{{n{wH?mSMj>oLI zQQ>|~US!nYrBH_6-Z<_EH@~gI{QG)@Z^)4t6#+tT$i~H$C)-1a1e-Llp)BuyI6S+| zu*5-p-GbMwr}^9$Ce9C`m-MC*RCPnb86CCo#u++z1lt_+M?ui(yuA-ees#?Hy_oP} zF1_zmZ;BNL6}jJ#{xEprzGdadOrj&1p>bc(<}-iYk7t=d$2-$w#)Ihd#!*@MEjQPf z5i?j}E&8RVtaGj3^BfnN^Jo)_&*oes#W)EWq~-!(S@j`uVQXm)3gg8pw`Vr}_RK~G zQsQ5quaIC==@q?*b6M?8d{SN}n;QK73swY{i;Ulb@7bRWlW~dhayVPEa;|FJ?@M2B zI@WwxE)WW{DO}IC%kDOw&e(GvK{pFtWPvDS)d2?bc}nkP;(C7~^W{$EkhcP}xg|8% zZ$bafLUWS@c&5B3JPu{Q{{>uy+hrZ03Qf`eMPML9hnG589?rQW(sk7V`M_{UP zdq~~DA!SG{f2R6haPn$gf+4lsr8@S2mTEircLwVX7wn z??1B>E6^kM!QKg?BJn|T2zehem)=gE@(>#Bx#Us5;8U(sR_c65;Y->^ne~;J`4na3 z2XwhpMCP%LG-q{=3u7;c{*>BLl>B$&@gI=Sw2z+3$$(BQ?CZlkAbm3aJcy;`Y~H8uGqs>q-@_WWBr9;+oiT9Rp8syUkgvrc zwwV)?V^C>hbMG&*k8d@ zP-%h9p+tsA_gcndu4aZi{+zb4@$dt2U8m^9L)2u;HEX2QMFG#tGB7K_?z4r@*_T{f z(Qv+v-AOOE?Cu45o=Gj9=PQUdR?m!14W`Se8~&axEL8b`1k*cL=bCF==Mu>M7aU>) zKpHWUr&G-TKesJLT!V<+yPocEioT{C=(AUs?@&1)@)Z*W@+CFARwLMp~M zF1={SW>nXB-&qPFSY;`@(VVc>@P@s5HSe>%w=*l6t8=bH!uQU>GduR)f)oM!-#lX! zG#3DBNQnUV6P5W$M)GO_q#*&@L0S~}jrD48+*%Ii z6iC%0PalSmkJ*FJwT%P`D1T&6sk zdW3ec-*RKhTff0nsr^W4$_u1+hZ(bax58--oocNc;m**18y}kUYrCXjP#MOvtRebG zh`n+YOX&Gs%$^MNz&&FYN^i6l@x5A0$qLAJrhI6x$Lw47f*yz}i#;TPVE^J|o2K>l zutcyBFJ`|$yPCFQSO4~EPvx<5<_ip1otoHASvw)s+17*_z5kVS<=wKv@dE93gd#gh zPvza0?nJ1?Kj6&1osrQgHL5B3mm~m&T*H0#ZB1f5 zPfxm28=eibvSvZppxAm!>~1CX>rW&gL^I*+{l@+r;TT?pyp63}emSv&KTB7a!MOrCXZR5pO(VAzD3&amhx9CD< zj1rNeT~cPBjd@|gy6z{?aCLI0pZDS8jAq$Mh~Wn_l*}mkC38sA2f1o zvTjx)87i`dB$lt36~&lUz5ZHdH`e9A1Tc4zn87n*yfmUvaSV|=x{t%y@c+^YHA|t<$^mKYlk?+hfu=vl>iHBgwxQ zl9G3OG>0z59=K{N#Lv(tqFN<9RS}h>f-MqBZhiV>GeY znk>p^VLxwV``$>c%6$9p)UMm>oMUUSKs6b|*kX-&M~T;hUMAJDEF9MlXIaN#eE1Ld z2x`@hpL`u=wdCrNL{A7bzBXSNP}AU*G>Hjn=!=HX;8F2XrroqURboPtK zFWS%W@q~#>CABwen|~fFmiA?hRP+YQfhUG3$fhM^d+{k)%eVPFC~@({fgNBaiM-LX zi%GY=emTd|($())ex4;d<{{kTlJV@nQ7GN-!o1tU4v``*J||w+mx^&5?$D$UHGaHZ z5ypPwrF8riHnttiRxB@e$NIma9L5ECw`0Fyo1@_~^YyvYkM%mM9&Kkj<(Z(fe}Fy4 zRbePJG+<1qZQt&bC=PBLfr<$iehwYOLvaU5$b!}0aKLTM;WYn(}jf$yVK44?Ny zjdQ{7%=-Z|0ClaD(y63vC`o*AX_E2CoD7n;iQBMo9m%A$~4{-R9^p_I{rdZS>6LcY!0W=v$N_Jg9sXjRau)isxi$O9RL2l9yJ(aQtBf7P;ULePP-nuh3zx#8Rx9H1x#q8ksLmq-n%Yu(m zA#L-tvo5?3p`6|$ij>rzXvq#0DmkoB8T$J5!BqRUznK+FxqHafJ}f=W%%Dc$?OvBu zyK<(bfKuEyD67!hV>GXcE<-P<(D)=80yCx)dNZyJIQ+VVK1>bC;D1$)hC&^!Cx`2| z()o+0$VlB>9R=Y)$F1XmOgM>;AA)gum=Xlr3*`~%MvM!iFnQs{wtDeSEnzKl7qt z9jqu_Qq1SQKpYrU?$?GM_NwWR^LNv_exCjqEt(Fv!9ZgWQH&`Ts%$CGX;wdKXkltq zF5^?qeiE#Fd=gR37+FQ!!=9|l)w6D|P@QV*)H?YDj`enPOIM{C!isVWPJ-_@v-1&W zfCc;R>3bggoq_lGJ4-sef4DJ>)+y1`+*JoK&G~*=GfGte5}QGGe{uWxv+o*;&rrJ+ zQ)GxH5+w&NH}F+K71?HpOj@5huMx;@)|P@dr?ZYUG~W7uMPi1|5S^dc;z%pr{+!O^ zQ0MTys3rC}w7>i%w&;_J={PN!9|Y@=O`FUD0Kb-B#kl%ST*f8+qgMeipyXv<+G+{^ z^(qXHkha^Msb)1e$bb&Z;!unGCm<@ObfZ0&V2yqC{S9Se`HT$CQA&TEf$V~RlLlQ& zd>ZXIS6dF%=p|@&Fx#6BODKX9WLPYRC8)!u4-UT?Zx9h6CQ9bJdni5V%(1^~M@2sF zVXj*lgoKl$JKj;o9G0nXS&cU$Tx?*W99Ey;mHo-vup_YYI{?Q>p|b645aii+b2PPV zl-}eBle*H?O2DgOPJ^9;@9&w>C~wEGYsJe3W2-DQ`+f*`kYMUv$YWT=G5)QvFoX!l zx>lgIH_u~thK=B!BzvTV*I`nG=Df8fezTB}Et%LN)64U%NLA$B1O)X4V5q+(1XR_Z z=WE6RL2wF6e~!&4!p!^8vGK6$nUmJP>XZReZ8%Wv3mHO75L*xT z1~3N#Ws<#%nXI2<+`P;dpr+0M7lk~4Gk8pyZzn!;SMv9c6#<8-_ zpJjb1N1Z|-F$GKf7FJ^Lts1d+MSW@)xT;=xcMfowfU}uNG(gaA5{L?cGRxMgk}UGJ zKkq#QbdRJ~%SnUxxSr=R7IgA!xGnVdIV^8+A~c{$K9u-r_uF9a?ED4)o4*5|=Bi!R z8gc=tV>qRzXls&Zb7OredQoR>Pm5W!K>i;}8R13Ovny7d@zlt2*Ra4Db|!t+F(EO8 zC8TD_arcLl*yr2`4drv^>X;#{NI|nFhD?MYY1s~*Oa(Uj%3fR@4XK4wJbW)ReRZ=O z&PQ>)pzSLB`R2Uobf&K$;okEUvqr8S+vef$-1kASHD+q@s8B^fMg_}tNS|9|?DS8c zx7=$+dp%QS$JUB>y<+0qoxhbp(8NnHXuVuFyd&__Py|L?+b?D|zOKq1DXZAzeS$f; zPRgzs`^RH;029|mgV(;8aY8u$weblM(<0ZjaxBc~>3uQoytz*=uaP7KcmMG`-t*te zsY_5Mxwa`33ps-0=i2D1&N6h7J18@0`-Y~?&UQL+XVd5b)D7lQdb6KY-B{{v?PKpVu2E%Hr1d}b5><}WgA7jYrui&w0Nq{7s`P5tKp<0J1Ee8_1 zkr_&c5KMh_?)OVny!nmDAGa~}@ce;&quvf)`lRwzEp-6XzmgI68AtzH48?kAL>IUw^zC&MeS23cY&4}1GsA+5Qo@nubouDLRNV&zm*Fcv|ms4 z6sbDfp40-@CA4%kEio3Ol!}i%!IJr=;de5jj|BQs58$D1c+H?aigNYKUP@-s?dUc$ zc>hjVUx#a^80mH6=>-{dYjbB`Zfmcx(rw2pXdG2Kl&L;5s6W5od}-RH<)uTxYZ&Vu zvpYloP>FtoZ5x?L=9cK1SoQqHym5d;>7Mq$6=0c9MI(9BJo#Vge!wSBQT8;j$HGJ2 ziIAc>KvV?lmnfU=q~)jk)eFk|qTLbqE;h_nQ!a4v!ZIq-|lq>BEa!vH&6~eLp zZJGVFuiv0D$c9gtfv;dlB0F5j`S{(Iy7DJ{sq+JQC}KpLwJlhUM&theL)%nnh^Tp? zkfHHfirUS+zBP12p+6Ahx^%s>BSZuS4k@r5RHZs?XGjvNUz9OA;)RBN<3jtkq1S%8 z_3cyU)ZS2v*w|d{YzNzFeZy?MF=Pw`uiGOK%oFvMM2f|AeSjod*eOj=Q(EKn?TnDq zwXrSi<}V2%@Gf(cNM$#DgW8bmA6!a9`;4qQk}J|0X2g z=;2Bl6`y01JoyUsOW(b)_N4$k+uEgX*nCZ4TO8zpW|s@Q8&YN1oqBHYV##p!9e2WwSxd?UL)EfT=pQyvZ-Nf* zP_K*Et%SK8{2CDCrOXNFOCiwlkCJGXolkl;@4a0-57S{iZMQBaXX#nV_&mEcY3BdO zAU*XHS{-^u$InOk5q&UPfc0 z71xWX8i~a;OhHxsqB@1NdR$$QgmQKfBbH-!Y`i}UMVy7U7FzrfGE#Ax^uAXIOYLf= z9X1up(MRe~550P#h^JPBh(U$Z5MQC3B`pz(8`mA>?|6ag@JIDm^i*m6QIMNNMv_$yxH|9u-QwahGp#Lda&O>VC!(i zPEf?rFKT?wh9*(uFP*{VkaPvOFqN$N;2doZB@nXh4NXV>OmE7P0o{yYO5(bq#Zq#8u!|KW8 zg0k8?Ubp8XK7Zdl3oH~6_Kl__hQ}37tp1-D0B%Vs4TMzYj-_vp#OU4gu0GTCR9c$4 z4hXyKR!`nHUE*HO3g;EzQ_iNk=5v;r{+tFwsvI$WdCyV@aGuSRwe3~U=~!cmlz;J= zyfQ8A__Mm-?nxDF=l2N@)N)I=un}r4*07z(ilTgI%Huij9!cvz=ZxGvTj)LB+%#b{ z_R;Xq=IYm%qnoZ-xXFD>oSDC;Dc<`&OiLwl?@x|-aFc3p*g#KvrTcbMv^N&^T z!r~h;VkfaT=Vr(_5K8&(1J3d4-XP9M3#oWp9z2Gf#$RMHTFu44KDV02&OL(Y^MEFt z8DkDhx775{BT)xBPdma?NuQsIkg3y{169wf!|px+zV`)A^L`M#0w63QY+cTkE97v? z^|WC>9^OaybHUWGMyh|KDsPNZmyf} zt^CbEK)@rX;S;;cbg!$!iq~+9Lf!JLl=;;vBBYtuA*+kwx~72ZZ-*nf${$sLt@J?G zkCkU_*r<)yjM;W$<3uSLBl#Zknv%AA$5V9jj``L8s1Cw}UbP<7ErrnJ094CHbqeYd z?7Ooyq9szO7BwW6jB#y6I1q&UB;47CAFufqyf!v#Wj!7GV48rnE^ui4T2uIEyOI>g z(Rz#7TG?cYiIE|l_s8;VEiB|?etCW0R!})vvFyrAm7_eZZ}oER*EjsRV_$c;_P{P# zd^P7wh0`HZp>|UU?O5|4Y?K&LidWm=)0!45_^80PeCoty(9zh>L>24=u>=)y6ee`0 ztr*_}nf)|EU@t_QmBDk)`Fjc6X7~q7Bzv!v?*&yU zD0xD0!5dsN+{H^(-hj=D#VkyuzUiNZmyxk?2h8L36PZC;$*x12ygsa0It)_FvX*ay zBLuh7-3n)?ZQ=6~_jCK!cF#ls=szT7%PTJRZTMN?eNu{?s3;YMyylI&kvw<;_Dzh{ z+7HiFpc!yhHk)is#1Z(Mss3;F5i|&AqX-uN&D?}|p})8Gg#=!clzn`Y--dRuk5%<- z2JDXFOYQV=^~N%SUo#70o_OrMrnpCvUMt!fNd)S`Qd(_lDES#RU&GzzeiD@>jndgY92dUE^XP+3w=KAUj-yCJEEKOF)f#Y z7M#4?qIZAlD{lNW20~hMH_N-LM6Ds`mkd~+J{)}Mx?J_Qn!FNzW{w+TT;CQC9V0z`9&mmN$iBZ zpPCv2yz3n}oPQE5xc8z?*O0@BYyMYrg>ClHY-NvNzbk1)~S5V_WUj2Vo?1 zPsSzU)if!S_Iq)%%KwZg-Ii5Z7JP4-pegN^jb?rZtos9rmh-rnDxq{UI$!pOA1MEB zg@R+g_;l3j7z_DL?g+ibd8MlhgO~3sDVC_8lEwyCKs`9_=+GYo#1aQGmZU>P`fPm< zLTQ<7Q{CgsHCFGom6^8_o;R>8Kw>{pTpWO+9ksjWH$FE&MEg391}vBD=AFAukZSiY zPdmz|-cFiW#e5=61poQZy-XHt*R2J3b6*bJjL6~v_Wk}>oh;HGbs(ZVnatsBYRY_O_QLi=88;#+gM*`yU1yenk18b(zUZ;l?O@zG0NA^&J#? zR^Hd8O95qflTdl{R^N#0@%J6ox0Zhl#n!%^j;^(*ocxPd6xV70MD+PYM9hRtN0Wjqm;}9!_ z$ohE@m4sJW=HK#edchHA+@{U}k15JTYa$jUakj@?Y;)9Q?XFjAqeHB9ki7q+I0>EH z=PEK0hSfh^&Fy+Clxqkmc%$%EMq5Ho=Dl=2&}ls}xG&J~;}Gx9bLj~TdfUqZ0f73cU0f(0h;8ns;co-ObJ9Vbt>v^=8bljwrO zb4ym&*|N;83)YI-$Cu9hBND@2oOfU3J<`z5~kTsC~U zELEWf;<#q7wIQtOs^E3}%|X-E2`E`2-XChZ=W_1|tdCC69eXZBO9UAM3=FQb*re0TcTc{l%RF76(y6UIN}; z#v$Xn)!&)OnOII8$NW@r?X5CezAL^XY9K#DOOCx8^K>p z6ovmVN^q7g(ktdVS{sUyH~iUOi0O#un&?XHSrUuhEK9GqQ<$IjX4!xdZYdgBO~geD zbCA5)$o{v8qxZk;SaPsyV)5({AjH0BW@k%Dj2|MMx5f*f*4yepE+oA1#xj%s?Wbn1 zly}N?IkZo9M2;5i_PZihFB#(V<9497f!^4gsSxIyt+DQgVy`Flf~dcf6Gf{tff%cQ z&!nTCfNJ#%BS*TPH3o42?@bx~uTY^WV@(Zi8&1Y-12(CX)`Ql6UhBv%FlwF5QKtcO zp@8R}v88qJPxGa=4{PGpF=xUqXdJr(6%AQ6@aGjEWD$x)3B@mGj{&-4Df4`G5q(q#j!G;7@8;~c zkt6)868+|mvj8~;03dbEd@Wa_=fuAzB^$i6O6_>w^8#JVp}Q$3<$nZqr|r=X$2^Vn z2V=JC2@8HV?FykxOE<62{%osNVvs}!6F+%08tqem?aC?cdqJ2eVp;F}1!rTn3OH)u zvN-fCm>AAYgz8F4b}bG4bq=%Yya@zR>m_%>nt+DcwTtf!)!dZyId${nOJJEps=-aFk)W~{&oBYR%VgJJ{LK<&+)umrUtmOA0b^jy(yp~1Q z?dyQfA1O9DMwvBu6gxi=1qRjWcMUg9BQg(th9u7(OJ4v+fJKD7Go+>;a9uTaO}X~@ zW$EK>c9vHng~O1Z|3T$J8}({+x5tY>nK)?^?QxyN*jwz*11=m^t7Sj0`9|pd72&bK zcD6X?yTpAEFJ9hqoj`c)bIFy@g8i#X7VqQLXE=l)|FmV!9$SGTp;Az$0NSwP0wak@ zZA6!Dl9DzKty4fFnBp}>zwch?iT^~09G0Oz`QvFqvTKK}$LDQ7KA9LZkYnH$^SQziNl|1HUTb4l;PIaY;>`LRfb*Yarhqp@&_7nw(nLC zR?l<35<-X%PvSxDM4hW`q5hC0aT!QZ%2C}bG{ogz+FgPc3!J17{85oTssoxxsGbXx zS1|W*!++Cj$|kh3N>V*X@3V9F{U7L1J3%*Crauzz>i}&cu z@K1EgTa=nMeO~#YeyRKimn#HE--{Ch;ZYr~kP1#`+Mzh333Z5I zqSQamCXfm8@hOck?rArK_^zjyxz~1aO>fi#`TlKlQ>qm*d;;#YcuV(APY7KyIxh=E z90J{$Dqq`0&-i4(-E#Oc0_P2|^(7^mU3Ik!(^tmEL5u<-vA!5`CQnBEjEv1qFYxoC z8z@f~XM4Sr19@3ix8z>^cJ?P5{s=HESuIyDOJU^u9!Ew*jWOr>INbkCUT-eLnJiUd zS0)>^h$pL7OVZbcitkGazuzM$##q&C zMHtgx#*g94Xh=G|c6bUr$&GbBS!NK?SA#dx{g3N11RgKFC+*)1jv?SH-W0XRe5J%_ zVcDL-mn=0Zg-*WRr}`_!tD;Vqhb?No<0GZ9h`6T6;yv~!Yjk+(I#F6RE;?6VbZvt} zrqa#1!ecGPb>h@|6?zFriBGR`v5TGVH8g4x+;&J zDz`wywrDl|3e1%q$X;3;8f9&@e<^xMX{CumhiWnyPdPK8AwWIcF_GUgBysYX8ZQ+! zv?gzA!7+uoku_uP+J?adQ~8_}Vpp!WZ#Al6W*t`T-&DWq3JUs%V@)9N@m%}RK7C9C|0N`<*Gv8iqQj{ zO7--MXf_m9BPJLUAg5Gln`b;@WGfAlgwSMM9LHg zj`7GsM(jI{^MgWFCj5aCu0baQx!e-65ifvPa8D&|K`$Ci@;yH#;o#!{p>~BOv{<6o zgs)uQO9Ov!>R*NPc*G`pRkn>6An5(hiQzAEs;&N-VDE6ix8zDqoHjDPT8%()G*F{( z_V4zpF>gp^=>z21i3&*Z?{Siui0H5%VYVt+Te>50u>&N~G4mlJXvPW{7Y-Z}Hh9Yd z%;}6n*8C+B^j?c-q#U6{L}r?`6;JgeQ$_3p#X=B+tK7bs>4~Rz-AS%G$nHo=_-EtG zV=1IB`LY#G$;_v9Iafx%NdvVs0rBf(O1y9a5(|Zy)Nv(RMYT_$@V7-AO{SHGMoN)f zt-Hw5_QA;DH;wGniX2~SsMz;EJZR8sV)Rq66^cS++cdFJx?A^=)3_D~`_XE={9N>{ z^o)oHbfY}?I4F~how6=oVblV~95J8xzL_SjTk|}dB4hrCv@uId;<-h;JNU=w1@O2v=7lg5;Zy+;h!gzLCS7?RNsN1+~Zl4$B&`nD3 zs-p^13yggiNb+pm@py8hwpgLJkbwL{(OcJ_Znipu_vu-ls$wjnx#$WWr>fcSy}zfI zXntD$O(w0pB3j~17QXNCXiPD8XG^OOuzQhoCORQZPI^uPU&eb>_lRuVHZ0n|RKkC`3iBf_1bY<%K4YEz#?J&JYVGmhCW7~km2-|+_(lVU7=yaq&FWtF>xQj9Yr4TDJpIq zJuoZ}l%QdQRZCTtd06-|zj}oqzL?{I+_ZT+x=)+Q|G*N-pIFZP^yZsa|3F#qNyQ~L zBMUdW=z&~<4+h1963Ste%kjYRGFP2r^QH#Y>BbH}`HE%J4mnd#yLdo2s{MljaQBR= z8sL(CHgL(3@IGWrsNg-~Pp&B7tK(4YRbZ4G(0j3*7VfO2shpXD9`uqDE4Ok=hW=z4 z@N2bQsqX@hdRhNNhF|B=lpTW59tT8c3&-4@ef6%!B-(NKJ%xn?8;Ov|(UfCDp7Ah& z2E|Uf@)BB-#sT$_)R+7M=VE#wDkWDVeky>c$?a{l=sB&l%<&i&G^i z%clk!@}>eMNC=0WkG8&=khMJ6XslJB5pj4~aLXLoD7zrcl*P)Y&2G&oh`PhbZui#i zTQgAJPjG7NrKIU$|3Yt1d%|-j+vwNH1rL3~=fUWh*sDFh)za(j6uWs3iIqes;)$u; zw6PL1Az<8_;o_2t*}Os2%icOall=UttQ5z(5Cbj8i>P&zAfo*+a>}*Qm*VC@YB?MU zWHeEHLc=^l-h!#`zl|@&D9K7aAM6zOFAtLN4EnZ~rMiYr6M^xoQJcY*X0~mR?6zLZ z^gY@+v(6kBUzV4YZy`T2!*>)`w=TmUBoQ#U{RReS54l;nzT;j|zBueg#nt|G!+ zn``hgus+{gc+Daeup%mT`YTiGnw+_(ona5BJgr@p9&fQbfx=!+gF`SycB8JZVzyJ{ zy!mpE;FwHOZ>$+o5C!f)eb2NHDTU^bVj(yUvx(20SPAcz-QZc1cq!w|fR!Og54-nk zPHCY$&!X{s($9G7;ftv5x0sa0KQMBLyjN9sSrc{uRnOEd6{OLAYADOqI?vaE? zr9e-5$2?g29HJc|svhl8L>ks{hZ>G~$7no_@tq8xHH<}=M_A}hNVkcS*VHema3c|6D6zxU`5{}6Lj5ZeRIC8>TMz-pTr2X~ zhi+j74hvTo-`DrZTQRA=eTKgpvPs@~BgQ49gxA=Yh1L3@G{C1K=Zh-VS<3;{!62F2 z*dgSZjr@|?hVmR0hg!137R2w4`)zXJqNW$g`?S}^EY?bYwC5H%G8!SfBSUZYi0+Bo zqf!d~OqQj{_xW4360M~~qrM`)XbSp7A$Qw|N<$uhUB;8-RdJ5~;PaB<0io|$^1HU& zOG)x642giIxT~zEuJuwM4#i=ZQz#a6aUHRYPb?;eDM7}g;h%6bY}!@khV4j`&NC{) zgIOZ&2_*d3E@HaH*-0|)EGutmNrg}M?1jyz>gPbjW4>hIH#T?3-eLTt5Xn9LZ$$`i zx)I$r^p#veL^7UH1@hJ>?V?T72RwSzmrNAN7acgdkQhBd%rL^$@HDwX9EhTenr=E| zCxCsC9=j}|OZ<15FH1MxEE|jWhDL4$H=BwSxmhbay|smi%3~cL)Rw@F1v}ix2JMb= z+hZIHQETK1dkDg1WQj(oE=u-8M=DywlWwCF>bu-Wp!oCYbFe^y%C`~+J3bq7#7(zY zjs-@wyFVP^1*X1?uZ!AE0e#tX2p)$4!XD7-cRlcdvLC~D#RPd1&GW>Vv5=kT; zVbBr?$9YHxTSaQ#+;%3cC6=77Oe4Fy3J2C6n0*)d#oP$H7Y-5TKWRvEVgE@g9;HMQ zl5BeXN|}U~H0(`B$EWXKXh>p>8!hGp!9oE^Ctm^ve*KRU6e?6d_L7L z$;9r$*0dO&QCN9^#5)2fy$IE4e%7h*eKPS||7wBiRd~2L3)Hw7c?XSiWTKvt67qhU zJx}kkHnUedXqPnammm$6v=Zlck#+o8h569dj_s4SL4up?Hg^up8FI$>%8Zav%ZkHT zjPxSX2iIvcoUfT<^ZK@3PIIro#zbdFMr!2!=tUHcP}E*n2nKLZEo6kHUV$EscV8A1 zO4}MML3iU?`3qewdfqDn9SYNz&m}~xzYKH|pE_fnRkSu!$)u^hSf00NOXXUMO#jw~ zo59QJ!ASBLLWoI14FtFHP}^>VNHRfsfkBbeVeeaUuKP5Jp}%l8>Y_RuM_EzxvBt0A zIXwBiY>wr@O+l9-m2F9(HupmrV^Df8Oc$O&&D`Q%Xn~XSw9F}*FAuk;pWx*rf=1|J z59#zg8@>G9Ihr$q=_K!&df*z-O+7T$K$-S`K=p= zHZ^KlI~cf;Gwl96avE*T!hSNHSog6rZ9y28;^=MSwXP#VJ~bOEtX)CGR}@)jnnw{_ z{kgC9Y59*^LwDL@$II^rqbH#0^kH2s=A+r4Z)v6v{4vQE(XoA;??u*OlhE(53cV~3 zXv7M(DqFuqP*!TA6YG-9k(x6lUkOwXYdpyAuuZS}*XzTdsD6w#0$4GrOpR8$lnc-a zs1{kCJh?VEj^-OinvA z>#Op{+wdd~L#%qIKe zif6(C8;c_&a3kG-z{8VR`R&;#@hd%^k`UKw#=h^AdSO@=*VSn1u(4=+&*tXMiHqK~ ze-7(;QwIaew$lf(`?ej;GV@Oz*<11Wmha^%w|?9YbIE1gr;Pq_>>d5bR6 z6Ea{azi=qY9@J6h<;OWO)ApZJn?4l=n4fAYrB@olMT9FOOchU!aqn)_EfKVEy=zYz znQ6=sEu`&<|F2ce0Y_-e1(@wi6Y0b~gbrezB(K1cJgzzKTmFWqH_+k#8RbdaZ>$9$jKv-LB9$l8u1jb++-~jQ zvo9iZQnZQ0i@u=SO3N8wIun-@Lwh$&yRrfD;go#5!siuRz&@G|`ZSgMZvF4UjVJ7p!F(x)vm2_)U686Gw|yE! zrkd|$j)`ZItXP!jfA@yUs#e7z$X2@*KahDzIC5*y=n%iZZ7cm43_G7C-h@Et6K~%O zP_q8PQKk6~M&x=@xA|nGIHsIpJ*L)cx- z2W4%@(K2KUj-F5*9IZO>tWkk}5^13jR$P;iLcx6JW2~wqGu5Y`* zK{K;Lpi~4)dhtb98YUT{v*Z0*@aAI%2F_7N#|dFjuP;&m_DJ^DP`Nrfp5B?NpTEXXX>|*0r~^Y@S9hrKjJ!}53s=cZhd#M z^LAAg+&lQl-0JU)d|g81*IS+T9bg4x(X%|ANC~tCRsR9j4(?ajv+*X690Rj!lN!? z1k~6Kyx<`D0BW1!9D+v{%%nMmNqLoFyF_s09wPEEHD_xY7d4BSO(6y^ni7+gl?}v638H{{EBz zu5z7b7_@G$ep~(XB&gEFdS&+4)Uz3d_0m`}zx>K~@~O02@2-{hZ;3`^QKl$Ck$7Cl_o~ab*)ERiB3;oD@CDfzXiM&0qGICcq!MvKsM;B(ZR?# z+@Jy9igRFfZcynkFbOvQ1w1BZOs#XkXBOnMYbAcBu4J$a5LNk90Bo6MsUnV%Rzi8H z;snl~E_90gz3Y@m%EjNp=T5{vD;Cy3#@jN^L$C1gN#mT1<28$NM3ywFU`&+bvV@O8 zqDPOWc|CFef(&sZi2UkdAGOiYrK!tO4f;oVZ~dZxp(l)85#cAv(xAXZMWn)j-$0bZ z1vF5t?(*yMQm8$nS9qBI-QGo%>HjW706~n{S~A~yHU4$=0(W%Hh(XiIZ=_Fu^)z^UI*8*YM7n%cTG9E<_!3W9V(F%*%e zG|53gdanXX5v3fYSB;=F1(9N?Qlu9F5fqi8D2RxmNmZI4N{}W30fBfB?%ba9jr-^O z^WAap9sY2PFp|CZT6?WI=ey?nWE(OV_)2reGfp^XSO*(_Z&*g=?(fGu*Ou}Xe3Gf) z{p7usJSV50P!wJ3l8gV&)Ym(mKA>>R8kb`Mqy4==xL)B1X*Q|(zejs@VGCT?T_D>x zOd8E-&$mRP6IGq=9JG=T@j#~<7^%}c{3+w|_<@Z0if%uO=i`LvD7Ui2scX*Ap(r0TXO^Yx zUqo>0r7!6lQa}!u6?YqH2~}nLGF~7a%S{e7OErzWw|b%q z$`X-m*9GksG(6@qoVI8R93jl+CaQXvFe^<2rB>NK(gY8vOz859`c>-dQU$m2whh;9 z%XFm6-7uGN^k=2MQlo_m9!-wxisNN*JTcyOseEyzA2DMG_O^vGY;AzOpGOTvYBMN+ zMk={WQx(A#iJ?m-xhJwDRk&q$3CCKwKAXl2O0)e{pb>49*BCg%xCQ-tZ0+vL9A7)?Y|E+0zOhe>hr@&$Ad_}1Oa z)^M&n1qE5vawdEYT(m2;>Qd+S)hzcs2|{6_D;Mu1oGx^`g+M=i_RF15yT9`3+4Tig(s#huQ1*%I;6uWf zL$sz<-Jv^&^Y(xMY%`g#-)Q#Mk!m8~XBehmwcl)G*_uxJ3)nNVRDVn>-xDg=kpvoL zJMjIta2+e*k-27g?}R1MG?poAS3vcXg1yUm_9QJ$7jc~20$R|*AD80)tWa|^uB5G` z{@tKoNsO96MVrkn1`-;D!v8w88-M-XjC|$O2X$ke5h;mD+Cep7G0!o+Z5UK^MF?X_ ztLxL`sKaBtmq4*2I-@atC|n8EYUZj}NyxE+lU|ymB?Cc}h-(NnClgIB|1K-4&Zhf} z)-P*(J!cRlQ|5oqq29+~&{s6*au-h5fGp9(FL(aBoDNoKiTz@VCC3HS&9g-Jynrh$ z3VI+wc9;DG6GRzb4Hlc9q^o@Vlr{ayL(=wtDvFu7QH>U!<63=~zk@EtCc$-XRx_@x z?X774!kh!n$gIz3m^Nqs%b+T)j%03n2H7n6S=6u2qbM?8bxrrLlR zgTPl_#`X_x48e(*2WldU>uT*nffiABLAS<>zc0BN=AHHF*CrFbX=unQ)c&D+7d~#aWifR65-yaHVC&pZ>@+tdPYzcISG~oUj8nXH3 z!_T;=CQLBpa1{Q>JsufBtncje8fs?GPjh{>qLl3mxBqbKo^QBmXj1Msf(kQgg@xBc z)_g`s>`SIZFghHoWP*6^=oHWJ5c6czwo&Ww=kQp|gSaaRrr`B+McOnDNApCIY|BMQ z3Lco`DKRG;IxVPoDGl67*%I-hSxl5THg91+$FS8m5?#97WRV67 zPzsFLc-FA3nDyINZpE7q@vSfiGqLVz>~qC`i#s48C~Hbfea(9Equ0KVXxm_853KL9 zx<#VRiQKTh#&4c#wB9mu`M(W?2H1-A(Kw5ni=s>n*`r3)ZSv&;wk_3PBQ@rKJS}hF zGrCsX2LO-@!Z-JJY76sn#l@eNw$m>kB=)IvtnlZccw3<7N92%0P;xRQ*{A~jly9IM zBcv{8mr@_T)*xj~kTSPVWwkU^EYTV}YHS}e;52sbv6jLGYCDeZ&*Ba-@udj>EB zOcdn}%`trw6y^geZop76+B6HM!TZShKLZ?FSb=vaT(91s04i?o`Is#u%3^9)OdMDj+xETBVKS3EyYHe!C|-d z&nUVJMd10=XIkT(M6VkCsG6Z`c2-JC5(w5(l(Bc{x}&J+3~42ww3Qrt&m~FErm(&u ztl-^vaEne!`FC+2JU($|)#ilw3ukG`@E1M4#?H}U*FtkXeQ3RVyRtg2{29*4Qda!W zukm?7OM6R|y0z1*SS7VXR~Ik ziLCErtcUl`S{K~a&^Tu1N+3a!D|^UVV1$^8J4g;}J#Fccxn_QtkWRKv>75vpF3;v5 z2P)Y!P+k%4r-b=AVmi64j76*hV1!; zj^SmE`JQyfk+2_@;}Rw;TkGtCX{Rcy&4Z22?|Vi`@9nwAN1_9I#bBV_Vd^@*Q(V`q zL%G->Ftf35u1Dy|lZ!LzSeA0pnv^he14^V@vzrA=TLfqn$UiyTxv@7}2rCbP8ogX} z3Kc*@q5m6dbgt0z&@^ZRzoAB3)!|D7rr**8K#ihrR68Wdz%>!5(e(43r?YZX|3HnH zIG;h12F}6XP^0Ca!+CH2A1>B=fCQM2Cs?=lP)vEw!6^=7RHK1QiSk3B2U5Wy^Ehsoj!DdMT zr%YE{9Q#ZQnzg%u<)~RE--=|%Ask~G%*ZOj5!`y)@I*io@KN7j01=fcfmXHRKEN^f zLzf5%FxY~{uTB$hFQ4=5MJ9r}mGr{k?>`zTuYlr9LaI2BE;$nB0j9V~V z#9`oI0rl|Q2`Y4hcK`wh2j}4_#Yra&+LQQ?fLC4Wi=*c-8^Wks(@-%J<(n!T1b6fd zl;;_cb?PIuJ3D~TKx6e;3aI5BP}YgUBc+QZDHrKoshZe_FcYx$1NhEJQI>)Y+sAH+ z1&ccfi>OPc`FM>!07ycqfb-G(d)X&hnYPz_!X70V4uF<323Fu9y4Ns4_N z>fML}T)+-g0`eEynM8!pD`5yAQDc)IN4g{9@2LJE$csN}syWYKf*!lXiJqbiRTe8`)9)D3}Q zj7E4~dORJWa0>suVLBnT2Jk>1wxWR3s1w3*QDR0t59TaiOvm%sjh@K&_iei1Y_-&> z`C9wrHuTxi8y~eCxofqs23Mqp1>$3t9za6AC3Y5P2-! zdh?E6mYxw#z1@5Lrx@>q%1j-2&gJz#UkW3MiH#LEm3=| z+nQWbZLJpZ?fXD5yFIRw7<NEE?{xUSW( z_OYy>wk%9xlq%l;v7C5I*Cb3&pcB8gyqCsL6zcs`ARX@9EgH3=H0cd zh#44_pIIduW99bhd7rSmYe$lN{2L7U*ymhK=yUFeLa3cMF-8;Trdsb0RdzUQPsiIZ zRhy)4&B}{l8`k*|LH3qmyXu7FWIH*~539Pl6G3$$?LT*eKiCZbY%56n{9mdCC!xi2 z?ei-x4?sakScg1vwGdFP#^&p~=~hTMdI5Fl@mFzgjl9@DvMmBpzDW%-EjEcSg^z}2 zpePZ~F;@b17oBz`8*Dq@)N%rc)K&D@w^$67eBM5L^-=E+s>6E)Zh0l#=!0kNj_-=A zAKlsLkTN5(`dWy?D)`InY#r8Drd>AL=u(#3(gVX|4<7Rm7z+&*_B>l9GrrgAs%$eH z!Dgs$eBpSCT{F;7TXgo9F2^o+x3*TbM1}peu{4u;AxUEX`qNrx^Ru3}28>psHkztp z)#5T#Di)6PY|HD7->4nk`{=a9gXZ~eZ0b=Lwncw_8&7@LUDv8_`h9hQT}SIP2_;xT zm-er?+fiXYTRhY=hG=T-KGWlchOTGayPieqlCx||_TqAyal771WTS2u4)W`KDFjuh zA%4&WO&Mi7lU;HqBh6!+p@>IM+oo=&cB$C2(M!-b{c9~YId8zkkg7Z{PI~^1X%i7M z@lqrn3OxnWfjvCN2+`!l!~P033_)yz!^aszZRLNiS!A!Fw%u<*WDRH1ECfCNVBOnl za}0GQ7WrIm-;F&Os@aIZa~-%5!2tODE%qSrGEO;%e&c}{9Zo_%k2VvBfxfZy$kPhG zBxLg+{n!^;O8E9_ER=gEd0K>f9~*0^^)i|E18!pL!hQyfKMkRE8mz7adFR&aY-%R~ zpg2(HeRgdVYHat2aG*f3y7Jw6f!wmL?ccW?r5v#hs0}ixZ|Oi}(5Y_t!oZGo1$Q6q z)M{|jZW!N_1CN87e{C@Sn{VPH)%K|l1Ilh5_AmN;!O(AV>|=hTwXOkRJIh(7x%%=s z1UaTTn+gkU0Rs8Jd|zSo_Re3+0}swBO*^l)^ZFjV3{m2be<@SAWsnMHK&Vy>egSlq zfl|Xm?0c`y^%YW(=3xFyAB7Om8vyV+O7A#c>;$@A8CmNhX+UYJ}$R=Tt+j0?blCkiI?zvv{N+h13-89vxK0|9sCZOIJ=55(V5Jj75jrH_8m`?1?~ z<35b8EaYOC`Y=B`4Ye?7T_Q|9ejyt{#f3}~+|iY`!>BgBVT6E4@8S=lqaLs-m|KdA z^A$WZtiX|+FoGNi4@sevnw!D#Tqe!7tqQ%&hB}-tn$tfZa^(EP^LnaxN6O)WCPE;Z zqANfs>CQhH{$jKxmxk92$#s+sKk8c$FU=NuGD~(m4|u5F^_GTw^4Geh$W!$C+@Lql zA6K3j<>Bp+CbdAhP!RjO097oBz_ZJBmeKV_yB2Fk?uF9u4R^%IufAi%@@0 zgrl;XPj0WALhm5r%16*l%Tyu+gIQBfUgRE91{-Y+mHFqjUq5UfM;`PmPsOA%f6k`^QCQ z&JyVwHa~@jqZgiDd@q0J>D*e}%{Ot8C(K4|bGIRXkw$ni2!A!<2f!>uMwSMpkJ|^o zw=V3C3_A{aHLLWUiI{(_UKD{Vr_)3r{tm#;%sL@=2#`~CA?;Uz-s=)*G0*|7Q|DK( zk!Jwi%<~Uo{Rc&_gC&O2jcw%ts8B0;*OQp!b+h|)xst1ld#B!(RD@CUG%S897j5J0L(jibA|WQc-xx=T#nayBY`f1 z3QA$6#`5HS8yS)JYf8Kf3p|P(0}Jo(_xR#lB{yJ$-r09w^zYaDqq>@tQTuf^4wgy* zuleN{n~rN^oa{ZyIAptlLgt?x^5Y|Zj(&h?Cn}gC2q>w5fZ_8&o~L0py#86e|MUIv ze|59{H$T>r{}Xl(ue=Y?fIkxE*IKHtGSwNW;Q4`TxKnHV%PAhPAbEjEUv2G4l|r&F z%;2q3QiK0=Lf1swbwfurDG~w#*>>4InsLqZ)QAvme5hxw zo}M++qfK&o=!dyobp8xYaC=nUVHK+Hb0M*CCv}H}!xapaM|>OGAZ$i2rkdz6zoPhI< z_)pm9!N1|?osq&wCVGIiI>Z?24&eNf7Jt)zft=h8sjjeq{ZjVt%b5KxOb`(8txPCD swebyO3cq2#P=^E}Q0@Oyi2E+9P*(uq3M-p6FziwK+9q1nn&+ba3w!&FBLDyZ literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.svg b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.svg new file mode 100644 index 00000000..723855a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.svg @@ -0,0 +1 @@ +Storageimage_1image_n. . .RepositoriesRegistryReportingLoggingDocker Registry Service API V2Authentication & AuthorizationNotifications \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md new file mode 100644 index 00000000..06a47842 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md @@ -0,0 +1,63 @@ + + + +# Docker Registry + +## What it is + +The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. +The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). + +## Why use it + +You should use the Registry if you want to: + + * tightly control where your images are being stored + * fully own your images distribution pipeline + * integrate images storage and distribution into your inhouse development workflow + +## Alternatives + +Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). + +Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). + +## Requirements + +The Registry is compatible with Docker engine **version 1.6.0 or higher**. +If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry) + +## TL;DR + +``` +# Start your registry +docker run -d -p 5000:5000 registry:2 + +# Pull (or build) some image from the hub +docker pull ubuntu + +# Tag the image so that it points to your registry +docker tag ubuntu localhost:5000/myfirstimage + +# Push it +docker push localhost:5000/myfirstimage + +# Pull it back +docker pull localhost:5000/myfirstimage +``` + +## Where to go next + +Simple as that? Yes. Continue on to read the [overview of the registry](introduction.md). + + + + \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md new file mode 100644 index 00000000..cd0a0a28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md @@ -0,0 +1,59 @@ + + +# Understanding the Registry + +A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. For example, the image `distribution/registry`, with tags `2.0` and `latest`. + +Users interact with a registry by using docker push and pull commands. For example, `docker pull myregistry.com/stevvooe/batman:voice`. + +Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage driver like S3, Microsoft Azure and Ceph are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). + +Since securing access to your hosted images is paramount, the Registry natively supports TLS. You can also enforce basic authentication through a proxy like Nginx. + +The Registry GitHub repository includes reference implementations for additional authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. + +Finally, the Registry includes a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting. Reporting is mostly useful for large installations that want to collect metrics. Currently, New Relic and Bugsnag are supported. + +## Understanding image naming + +Image names as used in typical docker commands reflect their origin: + + * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull registry-1.docker.io/library/ubuntu` command + * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find that image + +You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). + +## Use cases + +Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. + +It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. + +Finally, it's the best way to distribute images inside an airgap environment. + + +## Requirements + +You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. + +Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well. + +## Related information + + - [Deploy a registry](deploying.md) + - [Configure a registry](configuration.md) + - [Authentication](authentication.md) + - [Working with notifications](notifications.md) + - [Registry API](spec/api.md) + - [Storage driver model](storagedrivers.md) + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md new file mode 100644 index 00000000..5dbd766f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md @@ -0,0 +1,30 @@ + + +# Migrating a 1.0 registry to 2.0 + +TODO: This needs to be revised in light of Olivier's work + +A few thoughts here: + +There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. +The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. +One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. + +----- + +The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: + +1. Configure and test a 2.0 registry image in a sandbox environment. + +2. Back up up your production image storage. + + Your production image storage should reside on a volume or storage backend. + Make sure you have a backup of its contents. + +3. Stop your existing registry service. + +4. Restart your registry with your tested 2.0 image. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md new file mode 100644 index 00000000..78928401 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md @@ -0,0 +1,62 @@ +# Registry as a pull through cache + +A v2 Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. + +## Why? + +If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the image fetch traffic on your local network. + +## How does it work? + +The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. + +## What if the content changes on the Hub? + +When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. + +## What about my disk? + +In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. + +To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. + +## Running a Registry as a pull through cache + +The easiest way to run a registry as a pull through cache is to run the official Registry pull through cache official image. + +Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. + +### Configuring the cache + +To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. + +In order to access private images on the Docker Hub the username and password can be supplied. + +``` +proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] +``` + + + +## Configuring the Docker daemon + +You will need to pass the `--registry-mirror` option to your Docker daemon on startup: + +``` +docker --registry-mirror=https:// -d +``` + +For example, if your mirror is serving on http://10.0.0.2:5000, you would run: + +``` +docker --registry-mirror=https://10.0.0.2:5000 -d +``` + +NOTE: Depending on your local host setup, you may be able to add the --registry-mirror options to the `DOCKER_OPTS` variable in `/etc/default/` docker. + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml b/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml new file mode 100644 index 00000000..07bab4ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml @@ -0,0 +1,18 @@ +- ['registry/index.md', 'Reference', 'Docker Registry 2.0'] +- ['registry/introduction.md', 'Reference', '    ▪  Introduction' ] +- ['registry/deploying.md', 'Reference', '    ▪  Deploy a registry' ] +- ['registry/configuration.md', 'Reference', '    ▪  Configure a registry' ] +- ['registry/authentication.md', 'Reference', '    ▪  Authentication' ] +- ['registry/glossary.md', 'Reference', '    ▪  Glossary' ] +- ['registry/help.md', 'Reference', '    ▪  Getting help' ] +- ['registry/storagedrivers.md', 'Reference', '    ▪  Storage driver model' ] +- ['registry/notifications.md', 'Reference', '    ▪  Work with notifications' ] +- ['registry/spec/api.md', 'Reference', '    ▪  Registry Service API v2' ] + +- ['registry/spec/json.md', '**HIDDEN**'] +- ['registry/spec/auth/token.md', '**HIDDEN**'] +- ['registry/storage-drivers/azure.md', '**HIDDEN**' ] +- ['registry/storage-drivers/filesystem.md', '**HIDDEN**' ] +- ['registry/storage-drivers/inmemory.md', '**HIDDEN**' ] +- ['registry/storage-drivers/rados.md', '**HIDDEN**' ] +- ['registry/storage-drivers/s3.md','**HIDDEN**' ] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md new file mode 100644 index 00000000..0552f85c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md @@ -0,0 +1,323 @@ + + + + +# Notifications + +The Registry supports sending webhook notifications in response to events +happening within the registry. Notifications are sent in response to manifest +pushes and pulls and layer pushes and pulls. These actions are serialized into +events. The events are queued into a registry-internal broadcast system which +queues and dispatches events to [_Endpoints_](#endpoints). + +![](../images/notifications.png) + +## Endpoints + +Notifications are sent to _endpoints_ via HTTP requests. Each configured +endpoint has isolated queues, retry configuration and http targets within each +instance of a registry. When an action happens within the registry, it is +converted into an event which is dropped into an inmemory queue. When the +event reaches the end of the queue, an http request is made to the endpoint +until the request succeeds. The events are sent serially to each endpoint but +order is not guaranteed. + +## Configuration + +To setup a registry instance to send notifications to endpoints, one must add +them to the configuration. A simple example follows: + + notifications: + endpoints: + - name: alistener + url: https://mylistener.example.com/event + headers: + Authorization: [Bearer ] + timeout: 500ms + threshold: 5 + backoff: 1s + +The above would configure the registry with an endpoint to send events to +`https://mylistener.example.com/event`, with the header "Authorization: Bearer +". The request would timeout after 500 milliseconds. If +5 failures happen consecutively, the registry will backoff for 1 second before +trying again. + +For details on the fields, please see the [configuration documentation](configuration.md#notifications). + +A properly configured endpoint should lead to a log message from the registry +upon startup: + +``` +INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry +``` + +## Events + +Events have a well-defined JSON structure and are sent as the body of +notification requests. One or more events are sent in a structure called an +envelope. Each event has a unique id that can be used to uniqify incoming +requests, if required. Along with that, an _action_ is provided with a +_target, identifying the object mutated during the event. + +The fields available in an event are described in detail in the +[godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). + +**TODO:** Let's break out the fields here rather than rely on the godoc. + +The following is an example of a JSON event, sent in response to the push of a +manifest: + +```json +{ + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "size": 1, + "digest": "sha256:0123456789abcdef0", + "length": 1, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } +} +``` + +> __NOTE(stevvooe):__ As of version 2.1, the `length` field for event targets +> is being deprecated for the `size` field, bringing the target in line with +> common nomenclature. Both will continue to be set for the foreseeable +> future. Newer code should favor `size` but accept either. + +## Envelope + +The envelope contains one or more events, with the following json structure: + +```json +{ + "events": [ ... ], +} +``` + +While events may be sent in the same envelope, the set of events within that +envelope have no implied relationship. For example, the registry may choose to +group unrelated events and send them in the same envelope to reduce the total +number of requests. + +The full package has the mediatype +"application/vnd.docker.distribution.events.v1+json", which will be set on the +request coming to an endpoint. + +An example of a full event may look as follows: + +```json +GET /callback +Host: application/vnd.docker.distribution.events.v1+json +Authorization: Bearer +Content-Type: application/vnd.docker.distribution.events.v1+json + +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "length": 1, + "digest": "sha256:0123456789abcdef0", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 2, + "digest": "tarsum.v2+sha256:0123456789abcdef1", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 3, + "digest": "tarsum.v2+sha256:0123456789abcdef2", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} +``` + +## Responses + +The registry is fairly accepting of the response codes from endpoints. If an +endpoint responds with any 2xx or 3xx response code (after following +redirects), the message will be considered delivered and discarded. + +In turn, it is recommended that endpoints are accepting of incoming responses, +as well. While the format of event envelopes are standardized by media type, +any "pickyness" about validation may cause the queue to backup on the +registry. + +## Monitoring + +The state of the endpoints are reported via the debug/vars http interface, +usually configured to "http://localhost:5001/debug/vars". Information such as +configuration and metrics are available by endpoint. + +The following provides and example of a few endpoints that have experience +several failures and have since recovered: + +```json +"notifications":{ + "endpoints":[ + { + "name":"local-5003", + "url":"http://localhost:5003/callback", + "Headers":{ + "Authorization":[ + "Bearer \u003can example token\u003e" + ] + }, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":76, + "Events":76, + "Successes":0, + "Failures":0, + "Errors":46, + "Statuses":{ + + } + } + }, + { + "name":"local-8083", + "url":"http://localhost:8083/callback", + "Headers":null, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":0, + "Events":76, + "Successes":76, + "Failures":0, + "Errors":28, + "Statuses":{ + "202 Accepted":76 + } + } + } + ] +} +``` + +If using notification as part of a larger application, it is _critical_ to +monitor the size ("Pending" above) of the endpoint queues. If failures or +queue sizes are increasing, it can indicate a larger problem. + +The logs are also a valuable resource for monitoring problems. A failing +endpoint will lead to messages similar to the following: + +``` +ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying +WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off +``` + +The above indicates that several errors have led to a backoff and the registry +will wait before retrying. + +## Considerations + +Currently, the queues are inmemory, so endpoints should be _reasonably +reliable_. They are designed to make a best-effort to send the messages but if +an instance is lost, messages may be dropped. If an endpoint goes down, care +should be taken to ensure that the registry instance is not terminated before +the endpoint comes back up or messages will be lost. + +This can be mitigated by running endpoints in close proximity to the registry +instances. One could run an endpoint that pages to disk and then forwards a +request to provide better durability. + +The notification system is designed around a series of interchangeable _sinks_ +which can be wired up to achieve interesting behavior. If this system doesn't +provide acceptable guarantees, adding a transactional `Sink` to the registry +is a possibility, although it may have an effect on request service time. +Please see the +[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) +for more information. + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md new file mode 100644 index 00000000..c924d457 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md @@ -0,0 +1,62 @@ + + +# OS X Setup Guide + +This guide will walk you through running the new Go based [Docker registry](https://github.com/docker/distribution) on your local OS X machine. + +## Checkout the Docker Distribution source tree + +``` +mkdir -p $GOPATH/src/github.com/docker +git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution +cd $GOPATH/src/github.com/docker/distribution +``` + +## Build the registry binary + +``` +GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries +sudo cp bin/registry /usr/local/libexec/registry +``` + +## Setup + +Copy the registry configuration file in place: + +``` +mkdir /Users/Shared/Registry +cp docs/osx/config.yml /Users/Shared/Registry/config.yml +``` + +## Running the Docker Registry under launchd + +Copy the Docker registry plist into place: + +``` +plutil -lint docs/osx/com.docker.registry.plist +cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ +chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist +``` + +Start the Docker registry: + +``` +launchctl load ~/Library/LaunchAgents/com.docker.registry.plist +``` + +### Restarting the docker registry service + +``` +launchctl stop com.docker.registry +launchctl start com.docker.registry +``` + +### Unloading the docker registry service + +``` +launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist new file mode 100644 index 00000000..0982349f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist @@ -0,0 +1,42 @@ + + + + + Label + com.docker.registry + KeepAlive + + StandardErrorPath + /Users/Shared/Registry/registry.log + StandardOutPath + /Users/Shared/Registry/registry.log + Program + /usr/local/libexec/registry + ProgramArguments + + /usr/local/libexec/registry + /Users/Shared/Registry/config.yml + + Sockets + + http-listen-address + + SockServiceName + 5000 + SockType + dgram + SockFamily + IPv4 + + http-debug-address + + SockServiceName + 5001 + SockType + dgram + SockFamily + IPv4 + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml new file mode 100644 index 00000000..7c19e5f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml @@ -0,0 +1,16 @@ +version: 0.1 +log: + level: info + fields: + service: registry + environment: macbook-air +storage: + cache: + layerinfo: inmemory + filesystem: + rootdirectory: /Users/Shared/Registry +http: + addr: 0.0.0.0:5000 + secret: mytokensecret + debug: + addr: localhost:5001 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md new file mode 100644 index 00000000..9b56b6c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md @@ -0,0 +1,3450 @@ + + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification these changes to the docker the image format, covered in +docker/docker#8093. The new, self-contained image manifest simplifies image +definition and improves security. This specification will build on that work, +leveraging new properties of the manifest format to improve performance, +reduce bandwidth usage and decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occured. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less the + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the _Detail_ section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: + + digest := algorithm ":" hex + algorithm := /[A-Fa-f0-9_+.-]+/ + hex := /[A-Fa-f0-9]+/ + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | +tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | + +> __NOTE:__ While we show an example of using a `tarsum` digest, the security +> of tarsum has not been verified. It is recommended that most implementations +> use sha256 for interoperability. + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: + +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring _C_ passed into a function, _SHA256_, that returns a +bytestring B, which is the hash of _C_. _D_ gets the algorithm concatenated +with the hex encoding of _B_. We then define the identifier of _C_ to _ID(C)_ +as equal to _D_. A digest can be verified by independently calculating _D_ and +comparing it with identifier _ID(C)_ + +#### Digest Header + +To provide verification of http content, any response may include a `Docker- +Content-Digest` header. This will include the digest of the target entity +returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header `Docker- +> Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including tarsum) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +tarsums to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their tarsum digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see docker/docker#8093 +for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by tarsum digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an +opaque field, to be interpreted by the tarsum library. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the tarsum specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the tarsums will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble the it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest=[&digest=sha256:] +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +_Completed Upload_ section for details on the parameters and expected +responses. + +Additionally, the upload can be completed with a single `POST` request to +the uploads endpoint, including the "size" and "digest" parameters: + +``` +POST /v2//blobs/uploads/?digest=[&digest=sha256:] +Content-Length: +Content-Type: application/octet-stream + + +``` + +On the registry service, this should allocate a download, accept and verify +the data and return the same response as the final chunk of an upload. If the +POST request fails collecting the data in any way, the registry should attempt +to return an error response to the client with the `Location` header providing +a place to continue the download. + +The single `POST` method is provided for convenience and most clients should +implement `POST` + `PUT` to support reliable resume of uploads. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest=[&digest=sha256:] +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. The initial version of the registry API +will support a tarsum digest, in the standard tarsum format. For example, a +HTTP URI parameter might be as follows: + +``` +tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +result in this tarsum. Optionally, the registry can support other other digest +parameters for non-tarfile content stored as a layer. A regular hash digest +might be specified as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Such a parameter would be used to verify that the binary content (as opposed +to the tar content) would be verified at the end of the upload process. + +For the initial version, registry servers are only required to support the +tarsum format. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those specified in +the URL. The `reference` field may be a "tag" or a "digest". + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the _PUT Manifest section +for details on possible error codes that may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob, which will be a tarsum. An error is returned for +each unknown blob. The response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been recieved. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with above value from the `Link` header, +receiving the values _c_ and _d_. Note that n may change on second to last +response or be omitted fully, if the server may so choose. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | +| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | +| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | +| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | +| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | +| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | +| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | +| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | +| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | +| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | + + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| + `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. + `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. + `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. + `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. + `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. + `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. + `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. + `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. + `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. + `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. + `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. + `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. + `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. + + + +### Base + +Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization. + + + +#### GET Base + +Check that the endpoint implements Docker Registry API V2. + + + +``` +GET /v2/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| + + + + +###### On Success: OK + +``` +200 OK +``` + +The API implements V2 protocol and is accessible. + + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authorized to access the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +``` + +The registry does not implement the V2 API. + + + + + +### Tags + +Retrieve information about tags. + + + +#### GET Tags + +Fetch the tags under the repository identified by `name`. + + +##### Tags + +``` +GET /v2//tags/list +Host: +Authorization: +``` + +Return all tags for the repository + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +##### Tags Paginated + +``` +GET /v2//tags/list?n=&last= +``` + +Return a portion of the tags for the specified repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`name`|path|Name of the target repository.| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ], +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + + + +### Manifest + +Create, update, delete and retrieve manifests. + + + +#### GET Manifest + +Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. + + + +``` +GET /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: OK + +``` +200 OK +Docker-Content-Digest: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + +The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The name or reference was invalid. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The named manifest is not known to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | + + + + +#### PUT Manifest + +Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. + + + +``` +PUT /v2//manifests/ +Host: +Authorization: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Content-Digest: +``` + +The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location url of the uploaded manifest.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Manifest + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | +| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | +| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have permission to push to the repository. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Missing Layer(s) + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +} +``` + +One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + + +#### DELETE Manifest + +Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. + + + +``` +DELETE /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Accepted + +``` +202 Accepted +``` + + + + + + +###### On Failure: Invalid Name or Reference + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` were invalid and the delete was unable to proceed. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Unknown Manifest + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | + + + + + +### Blob + +Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. + + + +#### GET Blob + +Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + +##### Fetch Blob + +``` +GET /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Docker-Content-Digest: +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob content.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + +###### On Success: Temporary Redirect + +``` +307 Temporary Redirect +Location: +Docker-Content-Digest: +``` + +The blob identified by `digest` is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location where the layer should be accessible.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +##### Fetch Blob Part + +``` +GET /v2//blobs/ +Host: +Authorization: +Range: bytes=- +``` + +This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Range`|header|HTTP Range header specifying blob chunk.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Partial Content + +``` +206 Partial Content +Content-Length: +Content-Range: bytes -/ +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob chunk.| +|`Content-Range`|Content range of blob chunk.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. + + + + +#### DELETE Blob + +Delete the blob identified by `name` and `digest` + + + +``` +DELETE /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Docker-Content-Digest: +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Zero| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Method Not Allowed + +``` +405 Method Not Allowed +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +Delete is not enabled on the registry + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + + +### Initiate Blob Upload + +Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. + + + +#### POST Initiate Blob Upload + +Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. + + +##### Initiate Monolithic Blob Upload + +``` +POST /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octect-stream + + +``` + +Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|| +|`name`|path|Name of the target repository.| +|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been created in the registry and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to push to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +##### Initiate Resumable Blob Upload + +``` +POST /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Initiate a resumable blob upload with an empty request body. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Location: /v2//blobs/uploads/ +Range: 0-0 +Docker-Upload-UUID: +``` + +The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to push to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + + + +### Blob Upload + +Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. + + + +#### GET Blob Upload + +Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. + + + +``` +GET /v2//blobs/uploads/ +Host: +Authorization: +``` + +Retrieve the progress of the current upload, as reported by the `Range` header. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Progress + +``` +204 No Content +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The upload is known and in progress. The last received offset is available in the `Range` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + + +#### PATCH Blob Upload + +Upload a chunk of data for the specified upload. + + +##### Stream upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Type: application/octet-stream + + +``` + +Upload a stream of data to upload without completing the upload. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Data Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to push to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +##### Chunked upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Range: - +Content-Length: +Content-Type: application/octet-stream + + +``` + +Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Chunk Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to push to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. + + + + +#### PUT Blob Upload + +Complete the upload specified by `uuid`, optionally appending the body as the final chunk. + + + +``` +PUT /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octet-stream + + +``` + +Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| +|`digest`|query|Digest of uploaded blob.| + + + + +###### On Success: Upload Complete + +``` +204 No Content +Location: +Content-Range: - +Content-Length: 0 +Docker-Content-Digest: +``` + +The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location of the blob for retrieval| +|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to push to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + + +#### DELETE Blob Upload + +Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. + + + +``` +DELETE /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Cancel the upload specified by `uuid`. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Deleted + +``` +204 No Content +Content-Length: 0 +``` + +The upload has been successfully deleted. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +An error was encountered processing the delete. The client may ignore this error. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON error response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + + + +### Catalog + +List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. + + + +#### GET Catalog + +Retrieve a sorted, json list of repositories available in the registry. + + +##### Catalog Fetch Complete + +``` +GET /v2/_catalog +``` + +Request an unabridged list of repositories available. + + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] +} +``` + +Returns the unabridged list of repositories as a json response. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +##### Catalog Fetch Paginated + +``` +GET /v2/_catalog?n=&last= +``` + +Return the specified portion of repositories. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +} +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl new file mode 100644 index 00000000..cc6bd7c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl @@ -0,0 +1,1120 @@ + + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification these changes to the docker the image format, covered in +docker/docker#8093. The new, self-contained image manifest simplifies image +definition and improves security. This specification will build on that work, +leveraging new properties of the manifest format to improve performance, +reduce bandwidth usage and decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occured. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less the + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the _Detail_ section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: + + digest := algorithm ":" hex + algorithm := /[A-Fa-f0-9_+.-]+/ + hex := /[A-Fa-f0-9]+/ + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | +tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | + +> __NOTE:__ While we show an example of using a `tarsum` digest, the security +> of tarsum has not been verified. It is recommended that most implementations +> use sha256 for interoperability. + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: + +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring _C_ passed into a function, _SHA256_, that returns a +bytestring B, which is the hash of _C_. _D_ gets the algorithm concatenated +with the hex encoding of _B_. We then define the identifier of _C_ to _ID(C)_ +as equal to _D_. A digest can be verified by independently calculating _D_ and +comparing it with identifier _ID(C)_ + +#### Digest Header + +To provide verification of http content, any response may include a `Docker- +Content-Digest` header. This will include the digest of the target entity +returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header `Docker- +> Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including tarsum) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +tarsums to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their tarsum digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see docker/docker#8093 +for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by tarsum digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an +opaque field, to be interpreted by the tarsum library. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the tarsum specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the tarsums will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble the it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest=[&digest=sha256:] +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +_Completed Upload_ section for details on the parameters and expected +responses. + +Additionally, the upload can be completed with a single `POST` request to +the uploads endpoint, including the "size" and "digest" parameters: + +``` +POST /v2//blobs/uploads/?digest=[&digest=sha256:] +Content-Length: +Content-Type: application/octet-stream + + +``` + +On the registry service, this should allocate a download, accept and verify +the data and return the same response as the final chunk of an upload. If the +POST request fails collecting the data in any way, the registry should attempt +to return an error response to the client with the `Location` header providing +a place to continue the download. + +The single `POST` method is provided for convenience and most clients should +implement `POST` + `PUT` to support reliable resume of uploads. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest=[&digest=sha256:] +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. The initial version of the registry API +will support a tarsum digest, in the standard tarsum format. For example, a +HTTP URI parameter might be as follows: + +``` +tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +result in this tarsum. Optionally, the registry can support other other digest +parameters for non-tarfile content stored as a layer. A regular hash digest +might be specified as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Such a parameter would be used to verify that the binary content (as opposed +to the tar content) would be verified at the end of the upload process. + +For the initial version, registry servers are only required to support the +tarsum format. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those specified in +the URL. The `reference` field may be a "tag" or a "digest". + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the _PUT Manifest section +for details on possible error codes that may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob, which will be a tarsum. An error is returned for +each unknown blob. The response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been recieved. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with above value from the `Link` header, +receiving the values _c_ and _d_. Note that n may change on second to last +response or be omitted fully, if the server may so choose. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | +{{end}}{{end}} + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} +{{end}} + +{{range $route := .RouteDescriptors}} +### {{.Entity}} + +{{.Description}} + +{{range $method := $route.Methods}} + +#### {{.Method}} {{$route.Entity}} + +{{.Description}} + +{{if .Requests}}{{range .Requests}}{{if .Name}} +##### {{.Name}}{{end}} + +``` +{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} + +{{if or .Headers .PathParameters .QueryParameters}} +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| +{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| +{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| +{{end}}{{end}} + +{{if .Successes}} +{{range .Successes}} +###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Fields}}The following fields may be returned in the response body: + +|Name|Description| +|----|-----------| +{{range .Fields}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{if .Headers}} +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{end}}{{end}} + +{{if .Failures}} +{{range .Failures}} +###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Headers}} +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}} + +{{if .ErrorCodes}} +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorCodes}}| `{{$err}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | +{{end}} + +{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} + +{{end}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md new file mode 100644 index 00000000..a2da9483 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md @@ -0,0 +1,425 @@ + + + + +# Docker Registry v2 authentication via central service + +Today a Docker Registry can run in standalone mode in which there are no +authorization checks. While adding your own HTTP authorization requirements in +a proxy placed between the client and the registry can give you greater access +control, we'd like a native authorization mechanism that's public key based +with access control lists managed separately with the ability to have fine +granularity in access control on a by-key, by-user, by-namespace, and +by-repository basis. In v1 this can be configured by specifying an +`index_endpoint` in the registry's config. Clients present tokens generated by +the index and tokens are validated on-line by the registry with every request. +This results in a complex authentication and authorization loop that occurs +with every registry operation. Some people are very familiar with this image: + +![index auth](https://docs.docker.com/static_files/docker_pull_chart.png) + +The above image outlines the 6-step process in accessing the Official Docker +Registry. + +1. Contact the Docker Hub to know where I should download “samalba/busybox” +2. Docker Hub replies: + a. samalba/busybox is on Registry A + b. here are the checksums for samalba/busybox (for all layers) + c. token +3. Contact Registry A to receive the layers for samalba/busybox (all of them to + the base image). Registry A is authoritative for “samalba/busybox” but keeps + a copy of all inherited layers and serve them all from the same location. +4. Registry contacts Docker Hub to verify if token/user is allowed to download + images. +5. Docker Hub returns true/false lettings registry know if it should proceed or + error out. +6. Get the payload for all layers. + +The goal of this document is to outline a way to eliminate steps 4 and 5 from +the above process by using cryptographically signed tokens and no longer +require the client to authenticate each request with a username and password +stored locally in plain text. + +The new registry workflow is more like this: + +![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) + +1. Attempt to begin a push/pull operation with the registry. +2. If the registry requires authorization it will return a `401 Unauthorized` + HTTP response with information on how to authenticate. +3. The registry client makes a request to the authorization service for a + signed JSON Web Token. +4. The authorization service returns a token. +5. The client retries the original request with the token embedded in the + request header. +6. The Registry authorizes the client and begins the push/pull session as + usual. + +## Requirements + +- Registry Clients capable of generating key pairs which can be used to + authenticate to an authorization server. +- An authorization server capable of managing user accounts, their public keys, + and access controls to their resources hosted by any given service (such as + repositories in a Docker Registry). +- A Docker Registry capable of trusting the authorization server to sign tokens + which clients can use for authorization and the ability to verify these + tokens for single use or for use during a sufficiently short period of time. + +## Authorization Server Endpoint Descriptions + +This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) + +The described server is meant to serve as a user account and key manager and a +centralized access control list for resources hosted by other services which +wish to authenticate and manage authorizations using this services accounts and +their public keys. + +Such a service could be used by the official docker registry to authenticate +clients and verify their authorization to docker image repositories. + +Docker will need to be updated to interact with an authorization server to get +an authorization token. + +## How to authenticate + +Today, registry clients first contact the index to initiate a push or pull. +For v2, clients should contact the registry first. If the registry server +requires authentication it will return a `401 Unauthorized` response with a +`WWW-Authenticate` header detailing how to authenticate to this registry. + +For example, say I (username `jlhawn`) am attempting to push an image to the +repository `samalba/my-app`. For the registry to authorize this, I either need +`push` access to the `samalba/my-app` repository or `push` access to the whole +`samalba` namespace in general. The registry will first return this response: + +``` +HTTP/1.1 401 Unauthorized +WWW-Authenticate: Bearer realm="https://auth.docker.com/v2/token/",service="registry.docker.com",scope="repository:samalba/my-app:push" +``` + +This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) + +The client will then know to make a `GET` request to the URL +`https://auth.docker.com/v2/token/` using the `service` and `scope` values from +the `WWW-Authenticate` header. + +## Requesting a Token + +#### Query Parameters + +
+
+ service +
+
+ The name of the service which hosts the resource. +
+
+ scope +
+
+ The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should be specified multiple times if + there is more than one scope entry from the WWW-Authenticate + header. The above example would be specified as: + scope=repository:samalba/my-app:push. +
+
+ account +
+
+ The name of the account which the client is acting as. Optional if it + can be inferred from client authentication. +
+
+ +#### Description + +Requests an authorization token for access to a specific resource hosted by a +specific service provider. Requires the client to authenticate either using a +TLS client certificate or using basic authentication (or any other kind of +digest/challenge/response authentication scheme if the client doesn't support +TLS client certs). If the key in the client certificate is linked to an account +then the token is issued for that account key. If the key in the certificate is +linked to multiple accounts then the client must specify the `account` query +parameter. The returned token is in JWT (JSON Web Token) format, signed using +the authorization server's private key. + +#### Example + +For this example, the client makes an HTTP request to the following endpoint +over TLS using a client certificate with the server being configured to allow a +non-verified issuer during the handshake (i.e., a self-signed client cert is +okay). + +``` +GET /v2/token/?service=registry.docker.com&scope=repository:samalba/my-app:push&account=jlhawn HTTP/1.1 +Host: auth.docker.com +``` + +The server first inspects the client certificate to extract the subject key and +lookup which account it is associated with. The client is now authenticated +using that account. + +The server next searches its access control list for the account's access to +the repository `samalba/my-app` hosted by the service `registry.docker.com`. + +The server will now construct a JSON Web Token to sign and return. A JSON Web +Token has 3 main parts: + +1. Headers + + The header of a JSON Web Token is a standard JOSE header. The "typ" field + will be "JWT" and it will also contain the "alg" which identifies the + signing algorithm used to produce the signature. It will also usually have + a "kid" field, the ID of the key which was used to sign the token. + + Here is an example JOSE Header for a JSON Web Token (formatted with + whitespace for readability): + + ``` + { + "typ": "JWT", + "alg": "ES256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" + } + ``` + + It specifies that this object is going to be a JSON Web token signed using + the key with the given ID using the Elliptic Curve signature algorithm + using a SHA256 hash. + +2. Claim Set + + The Claim Set is a JSON struct containing these standard registered claim + name fields: + +
+
+ iss (Issuer) +
+
+ The issuer of the token, typically the fqdn of the authorization + server. +
+
+ sub (Subject) +
+
+ The subject of the token; the id of the client which requested it. +
+
+ aud (Audience) +
+
+ The intended audience of the token; the id of the service which + will verify the token to authorize the client/subject. +
+
+ exp (Expiration) +
+
+ The token should only be considered valid up to this specified date + and time. +
+
+ nbf (Not Before) +
+
+ The token should not be considered valid before this specified date + and time. +
+
+ iat (Issued At) +
+
+ Specifies the date and time which the Authorization server + generated this token. +
+
+ jti (JWT ID) +
+
+ A unique identifier for this token. Can be used by the intended + audience to prevent replays of the token. +
+
+ + The Claim Set will also contain a private claim name unique to this + authorization server specification: + +
+
+ access +
+
+ An array of access entry objects with the following fields: + +
+
+ type +
+
+ The type of resource hosted by the service. +
+
+ name +
+
+ The name of the resource of the given type hosted by the + service. +
+
+ actions +
+
+ An array of strings which give the actions authorized on + this resource. +
+
+
+
+ + Here is an example of such a JWT Claim Set (formatted with whitespace for + readability): + + ``` + { + "iss": "auth.docker.com", + "sub": "jlhawn", + "aud": "registry.docker.com", + "exp": 1415387315, + "nbf": 1415387015, + "iat": 1415387015, + "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", + "access": [ + { + "type": "repository", + "name": "samalba/my-app", + "actions": [ + "push" + ] + } + ] + } + ``` + +3. Signature + + The authorization server will produce a JOSE header and Claim Set with no + extraneous whitespace, i.e., the JOSE Header from above would be + + ``` + {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} + ``` + + and the Claim Set from above would be + + ``` + {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push"]}]} + ``` + + The utf-8 representation of this JOSE header and Claim Set are then + url-safe base64 encoded (sans trailing '=' buffer), producing: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 + ``` + + for the JOSE Header and + + ``` + eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + for the Claim Set. These two are concatenated using a '.' character, + yielding the string: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + This is then used as the payload to a the `ES256` signature algorithm + specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) + draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) + + This example signature will use the following ECDSA key for the server: + + ``` + { + "kty": "EC", + "crv": "P-256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", + "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", + "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", + "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" + } + ``` + + A resulting signature of the above payload using this key is: + + ``` + QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + + Concatenating all of these together with a `.` character gives the + resulting JWT: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + +This can now be placed in an HTTP response and returned to the client to use to +authenticate to the audience service: + + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} +``` + +## Using the signed token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) + +## Verifying the token + +The registry must now verify the token presented by the user by inspecting the +claim set within. The registry will: + +- Ensure that the issuer (`iss` claim) is an authority it trusts. +- Ensure that the registry identifies as the audience (`aud` claim). +- Check that the current time is between the `nbf` and `exp` claim times. +- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has + not been seen before. + - To enforce this, the registry may keep a record of `jti`s it has seen for + up to the `exp` time of the token to prevent token replays. +- Check the `access` claim value and use the identified resources and the list + of actions authorized to determine whether the token grants the required + level of access for the operation the client is attempting to perform. +- Verify that the signature of the token is valid. + +At no point in this process should the registry need to call back to +the authorization server. If anything, it would only need to update a list of +trusted public keys for verifying token signatures or use a separate API +(still to be spec'd) to add/update resource records on the authorization +server. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md new file mode 100644 index 00000000..5cec148f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md @@ -0,0 +1,26 @@ +# Distribution API Implementations + +This is a list of known implementations of the Distribution API spec. + +## [Docker Distribution Registry](https://github.com/docker/distribution) + +Docker distribution is the reference implementation of the distribution API +specification. It aims to fully implement the entire specification. + +### Releases +#### 2.0.1 (_in development_) +Implements API 2.0.1 + +_Known Issues_ + - No resumable push support + - Content ranges ignored + - Blob upload status will always return a starting range of 0 + +#### 2.0.0 +Implements API 2.0.0 + +_Known Issues_ + - No resumable push support + - No PATCH implementation for blob upload + - Content ranges ignored + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md new file mode 100644 index 00000000..a7b1807f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md @@ -0,0 +1,88 @@ + + + + +# Docker Distribution JSON Canonicalization + +To provide consistent content hashing of JSON objects throughout Docker +Distribution APIs, the specification defines a canonical JSON format. Adopting +such a canonicalization also aids in caching JSON responses. + +## Rules + +Compliant JSON should conform to the following rules: + +1. All generated JSON should comply with [RFC + 7159](http://www.ietf.org/rfc/rfc7159.txt). +2. Resulting "JSON text" shall always be encoded in UTF-8. +3. Unless a canonical key order is defined for a particular schema, object + keys shall always appear in lexically sorted order. +4. All whitespace between tokens should be removed. +5. No "trailing commas" are allowed in object or array definitions. + +## Examples + +The following is a simple example of a canonicalized JSON string: + +```json +{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} +``` + +## Reference + +### Other Canonicalizations + +The OLPC project specifies [Canonical +JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in +[TUF](http://theupdateframework.com/), which may be used with other +distribution-related protocols, this alternative format has been proposed in +case the original source changes. Specifications complying with either this +specification or an alternative should explicitly call out the +canonicalization format. Except for key ordering, this specification is mostly +compatible. + +### Go + +In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library +will emit canonical JSON by default. Simply using `json.Marshal` will suffice +in most cases: + +```go +incoming := map[string]interface{}{ + "asdf": 1, + "qwer": []interface{}{}, + "zxcv": []interface{}{ + map[string]interface{}{}, + true, + int(1e9), + "tyui", + }, +} + +canonical, err := json.Marshal(incoming) +if err != nil { + // ... handle error +} +``` + +To apply canonical JSON format spacing to an existing serialized JSON buffer, one +can use +[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) +with the following arguments: + +```go +incoming := getBytes() +var canonical bytes.Buffer +if err := json.Indent(&canonical, incoming, "", ""); err != nil { + // ... handle error +} +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md new file mode 100644 index 00000000..259e3cf6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md @@ -0,0 +1,153 @@ +# Image Manifest Version 2, Schema 1 + +This document outlines the format of of the V2 image manifest. The image +manifest described herein was introduced in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). +It is a provisional manifest to provide a compatibility with the [V1 Image +format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the +requirements are defined for the [V2 Schema 2 +image](https://github.com/docker/distribution/pull/62). + + +Image manifests describe the various constituents of a docker image. Image +manifests can be serialized to JSON format with the following media types: + +Manifest Type | Media Type +------------- | ------------- +manifest | "application/vnd.docker.distribution.manifest.v1+json" +signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" + +*Note that "application/json" will also be accepted for schema 1.* + +References: + + - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) + - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) + +## *Manifest* Field Descriptions + +Manifest provides the base accessible fields for working with V2 image format + in the registry. + +- **`name`** *string* + + name is the name of the image's repository + +- **`tag`** *string* + + tag is the tag of the image + +- **`architecture`** *string* + + architecture is the host architecture on which this image is intended to + run. This is for information purposes and not currently used by the engine + +- **`fsLayers`** *array* + + fsLayers is a list of filesystem layer blob sums contained in this image. + + An fsLayer is a struct consisting of the following fields + - **`blobSum`** *digest.Digest* + + blobSum is the digest of the referenced filesystem image layer. A + digest can be a tarsum or sha256 hash. + + +- **`history`** *array* + + history is a list of unstructured historical data for v1 compatibility. + + history is a struct consisting of the following fields + - **`v1Compatibility`** string + + V1Compatibility is the raw V1 compatibility information. This will + contain the JSON object describing the V1 of this image. + +- **`schemaVersion`** *int* + + SchemaVersion is the image manifest schema that this image follows. + +## Signed Manifests + +Signed manifests provides an envelope for a signed image manifest. A signed +manifest consists of an image manifest along with an additional field +containing the signature of the manifest. + +The docker client can verify signed manifests and displays a message to the user. + +### Signing Manifests + +Image manifests can be signed in two different ways: with a *libtrust* private + key or an x509 certificate chain. When signing with an x509 certificate chain, + the public key of the first element in the chain must be the public key + corresponding with the sign key. + +### Signed Manifest Field Description + +Signed manifests include an image manifest and and a list of signatures generated +by *libtrust*. A signature consists of the following fields: + + +- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* + + A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) + +- **`signature`** *string* + + A signature for the image manifest, signed by a *libtrust* private key + +- **`protected`** *string* + + The signed protected header + +## Example Manifest + +*Example showing the official 'hello-world' image manifest.* + +``` +{ + "name": "hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + ], + "schemaVersion": 1, + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", + "kty": "EC", + "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", + "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" + }, + "alg": "ES256" + }, + "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", + "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" + } + ] +} + +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md new file mode 100644 index 00000000..f994f38a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md @@ -0,0 +1,24 @@ + + + +# Microsoft Azure storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. + +## Parameters + +The following parameters must be used to authenticate and configure the storage driver (case-sensitive): + +* `accountname`: Name of the Azure Storage Account. +* `accountkey`: Primary or Secondary Key for the Storage Account. +* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. +* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. + +[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ +[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md new file mode 100644 index 00000000..2dbad8cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md @@ -0,0 +1,16 @@ + + + +# Filesystem storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. + +## Parameters + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/var/lib/registry`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md new file mode 100644 index 00000000..f43e1510 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md @@ -0,0 +1,18 @@ + + + +# In-memory storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. + +**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. + +## Parameters + +None diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md new file mode 100644 index 00000000..748a31da --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md @@ -0,0 +1,31 @@ + + +# Aliyun OSS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. + +## Parameters + +* `accesskeyid`: Your access key ID. + +* `accesskeysecret`: Your access key secret. + +* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at + +* `endpoint`: (optional) By default, the endpoint shoulb be `..aliyuncs.com` or `.-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. + +* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at + +* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). + +* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to false if not specified. + +* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. + +* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md new file mode 100644 index 00000000..4b630e19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md @@ -0,0 +1,40 @@ + + + +# Ceph RADOS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses +[Ceph RADOS Object Storage][rados] for storage backend. + +## Parameters + +The following parameters must be used to configure the storage driver +(case-sensitive): + +* `poolname`: Name of the Ceph pool +* `username` *optional*: The user to connect as (i.e. admin, not client.admin) +* `chunksize` *optional*: Size of the written RADOS objects. Default value is +4MB (4194304). + +This drivers loads the [Ceph client configuration][rados-config] from the +following regular paths (the first found is used): + +* `$CEPH_CONF` (environment variable) +* `/etc/ceph/ceph.conf` +* `~/.ceph/config` +* `ceph.conf` (in the current working directory) + +## Developing + +To include this driver when building Docker Distribution, use the build tag +`include_rados`. Please see the [building documentation][building] for details. + +[rados]: http://ceph.com/docs/master/rados/ +[rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ +[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md new file mode 100644 index 00000000..8dc3b234 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md @@ -0,0 +1,34 @@ + + + +# S3 storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. + +## Parameters + +`accesskey`: Your aws access key. + +`secretkey`: Your aws secret key. + +**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. + +`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html + +`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). + +`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. + +`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) + +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md new file mode 100644 index 00000000..372cb6ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md @@ -0,0 +1,139 @@ + + + +# OpenStack Swift storage driver + +An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ authurl + +

URL for obtaining an auth token.

+
+ username + +

+ Your OpenStack user name.

+

+
+ password +

+

+

+ Your OpenStack password. +

+
+ container + +

+ The name of your Swift container where you wish to store objects. An + additional container called _segments stores the data + is used. The driver creates both the named container and the segments + container during its initialization. +

+
+ tenant + +

+ Optionally, your OpenStack tenant name. You can either use tenant or tenantid. +

+
+ tenantid + +

+ Optionally, your OpenStack tenant id. You can either use tenant or tenantid. +

+
+ domain + +

+ Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. +

+
+ domainid + +

+ Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. +

+
+ insecureskipverify + +

+ Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. +

+
+ region + +

+ Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). +

+
+ chunksize + +

+ Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. +

+
+ prefix + +

+ Optionally, supply the root directory tree in which to store all registry files. Defaults to the empty string which is the container's root.

+

+
\ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md new file mode 100644 index 00000000..b014049c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md @@ -0,0 +1,61 @@ + + + +# Docker Registry Storage Driver + +This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. + +## Provided Drivers + +This storage driver package comes bundled with several drivers: + +- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. +- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. +- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. +- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). +- [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. +- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). +- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). + +## Storage Driver API + +The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. + +Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. + +Storage drivers are intended to be written in Go, providing compile-time +validation of the `storagedriver.StorageDriver` interface. + +## Driver Selection and Configuration + +The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. + +Storage driver factories may be registered by name using the +`factory.Register` method, and then later invoked by calling `factory.Create` +with a driver name and parameters map. If no such storage driver can be found, +`factory.Create` will return an `InvalidStorageDriverError`. + +## Driver Contribution + +### Writing new storage drivers +To create a valid storage driver, one must implement the +`storagedriver.StorageDriver` interface and make sure to expose this driver +via the factory system. + +#### Registering +Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. + +## Testing +Storage driver test suites are provided in +`storagedriver/testsuites/testsuites.go` and may be used for any storage +driver written in Go. Tests can be registered using the `RegisterSuite` +function, which run the same set of tests for any registered drivers. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/errors.go new file mode 100644 index 00000000..53def4b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/errors.go @@ -0,0 +1,82 @@ +package distribution + +import ( + "fmt" + "strings" + + "github.com/docker/distribution/digest" +) + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go new file mode 100644 index 00000000..73fcc453 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go @@ -0,0 +1,37 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/docker/distribution/health" +) + +var ( + updater = health.NewStatusUpdater() +) + +// DownHandler registers a manual_http_status that always returns an Error +func DownHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(errors.New("Manual Check")) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// UpHandler registers a manual_http_status that always returns nil +func UpHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(nil) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// init sets up the two endpoints to bring the service up and down +func init() { + health.Register("manual_http_status", updater) + http.HandleFunc("/debug/health/down", DownHandler) + http.HandleFunc("/debug/health/up", UpHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go new file mode 100644 index 00000000..ec82154f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go @@ -0,0 +1,86 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/health" +) + +// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes +// the status code of the response to 503 +// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus +func TestPOSTDownHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 1 { + t.Errorf("DownHandler didn't add an error check.") + } +} + +// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes +// the status code of the response to 200 +func TestPOSTUpHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + UpHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 0 { + t.Errorf("UpHandler didn't remove the error check.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go new file mode 100644 index 00000000..9de14010 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go @@ -0,0 +1,35 @@ +package checks + +import ( + "errors" + "github.com/docker/distribution/health" + "net/http" + "os" +) + +// FileChecker checks the existence of a file and returns and error +// if the file exists, taking the application out of rotation +func FileChecker(f string) health.Checker { + return health.CheckFunc(func() error { + if _, err := os.Stat(f); err == nil { + return errors.New("file exists") + } + return nil + }) +} + +// HTTPChecker does a HEAD request and verifies if the HTTP status +// code return is a 200, taking the application out of rotation if +// otherwise +func HTTPChecker(r string) health.Checker { + return health.CheckFunc(func() error { + response, err := http.Head(r) + if err != nil { + return errors.New("error while checking: " + r) + } + if response.StatusCode != http.StatusOK { + return errors.New("downstream service returned unexpected status: " + string(response.StatusCode)) + } + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go new file mode 100644 index 00000000..4e49d118 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go @@ -0,0 +1,25 @@ +package checks + +import ( + "testing" +) + +func TestFileChecker(t *testing.T) { + if err := FileChecker("/tmp").Check(); err == nil { + t.Errorf("/tmp was expected as exists") + } + + if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { + t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) + } +} + +func TestHTTPChecker(t *testing.T) { + if err := HTTPChecker("https://www.google.cybertron").Check(); err == nil { + t.Errorf("Google on Cybertron was expected as not exists") + } + + if err := HTTPChecker("https://www.google.pt").Check(); err != nil { + t.Errorf("Google at Portugal was expected as exists, error:%v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go new file mode 100644 index 00000000..8faa32f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go @@ -0,0 +1,130 @@ +// Package health provides a generic health checking framework. +// The health package works expvar style. By importing the package the debug +// server is getting a "/debug/health" endpoint that returns the current +// status of the application. +// If there are no errors, "/debug/health" will return a HTTP 200 status, +// together with an empty JSON reply "{}". If there are any checks +// with errors, the JSON reply will include all the failed checks, and the +// response will be have an HTTP 503 status. +// +// A Check can either be run synchronously, or asynchronously. We recommend +// that most checks are registered as an asynchronous check, so a call to the +// "/debug/health" endpoint always returns immediately. This pattern is +// particularly useful for checks that verify upstream connectivity or +// database status, since they might take a long time to return/timeout. +// +// Installing +// +// To install health, just import it in your application: +// +// import "github.com/docker/distribution/health" +// +// You can also (optionally) import "health/api" that will add two convenience +// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add +// "manual" checks that allow the service to quickly be brought in/out of +// rotation. +// +// import _ "github.com/docker/distribution/registry/health/api" +// +// # curl localhost:5001/debug/health +// {} +// # curl -X POST localhost:5001/debug/health/down +// # curl localhost:5001/debug/health +// {"manual_http_status":"Manual Check"} +// +// After importing these packages to your main application, you can start +// registering checks. +// +// Registering Checks +// +// The recommended way of registering checks is using a periodic Check. +// PeriodicChecks run on a certain schedule and asynchronously update the +// status of the check. This allows "CheckStatus()" to return without blocking +// on an expensive check. +// +// A trivial example of a check that runs every 5 seconds and shuts down our +// server if the current minute is even, could be added as follows: +// +// func currentMinuteEvenCheck() error { +// m := time.Now().Minute() +// if m%2 == 0 { +// return errors.New("Current minute is even!") +// } +// return nil +// } +// +// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) +// +// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to +// implement the exact same check, but add a threshold of failures after which +// the check will be unhealthy. This is particularly useful for flaky Checks, +// ensuring some stability of the service when handling them. +// +// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) +// +// The lowest-level way to interact with the health package is calling +// "Register" directly. Register allows you to pass in an arbitrary string and +// something that implements "Checker" and runs your check. If your method +// returns an error with nil, it is considered a healthy check, otherwise it +// will make the health check endpoint "/debug/health" start returning a 503 +// and list the specific check that failed. +// +// Assuming you wish to register a method called "currentMinuteEvenCheck() +// error" you could do that by doing: +// +// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) +// +// CheckFunc is a convenience type that implements Checker. +// +// Another way of registering a check could be by using an anonymous function +// and the convenience method RegisterFunc. An example that makes the status +// endpoint always return an error: +// +// health.RegisterFunc("my_check", func() error { +// return Errors.new("This is an error!") +// })) +// +// Examples +// +// You could also use the health checker mechanism to ensure your application +// only comes up if certain conditions are met, or to allow the developer to +// take the service out of rotation immediately. An example that checks +// database connectivity and immediately takes the server out of rotation on +// err: +// +// updater = health.NewStatusUpdater() +// health.RegisterFunc("database_check", func() error { +// return updater.Check() +// })) +// +// conn, err := Connect(...) // database call here +// if err != nil { +// updater.Update(errors.New("Error connecting to the database: " + err.Error())) +// } +// +// You can also use the predefined Checkers that come included with the health +// package. First, import the checks: +// +// import "github.com/docker/distribution/health/checks +// +// After that you can make use of any of the provided checks. An example of +// using a `FileChecker` to take the application out of rotation if a certain +// file exists can be done as follows: +// +// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) +// +// After registering the check, it is trivial to take an application out of +// rotation from the console: +// +// # curl localhost:5001/debug/health +// {} +// # touch /tmp/disable +// # curl localhost:5001/debug/health +// {"fileChecker":"file exists"} +// +// You could also test the connectivity to a downstream service by using a +// "HTTPChecker", but ensure that you only mark the test unhealthy if there +// are a minimum of two failures in a row: +// +// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) +package health diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go new file mode 100644 index 00000000..8a4df776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go @@ -0,0 +1,217 @@ +package health + +import ( + "encoding/json" + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + registeredChecks = make(map[string]Checker) +) + +// Checker is the interface for a Health Checker +type Checker interface { + // Check returns nil if the service is okay. + Check() error +} + +// CheckFunc is a convenience type to create functions that implement +// the Checker interface +type CheckFunc func() error + +// Check Implements the Checker interface to allow for any func() error method +// to be passed as a Checker +func (cf CheckFunc) Check() error { + return cf() +} + +// Updater implements a health check that is explicitly set. +type Updater interface { + Checker + + // Update updates the current status of the health check. + Update(status error) +} + +// updater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type updater struct { + mu sync.Mutex + status error +} + +// Check implements the Checker interface +func (u *updater) Check() error { + u.mu.Lock() + defer u.mu.Unlock() + + return u.status +} + +// Update implements the Updater interface, allowing asynchronous access to +// the status of a Checker. +func (u *updater) Update(status error) { + u.mu.Lock() + defer u.mu.Unlock() + + u.status = status +} + +// NewStatusUpdater returns a new updater +func NewStatusUpdater() Updater { + return &updater{} +} + +// thresholdUpdater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type thresholdUpdater struct { + mu sync.Mutex + status error + threshold int + count int +} + +// Check implements the Checker interface +func (tu *thresholdUpdater) Check() error { + tu.mu.Lock() + defer tu.mu.Unlock() + + if tu.count >= tu.threshold { + return tu.status + } + + return nil +} + +// thresholdUpdater implements the Updater interface, allowing asynchronous +// access to the status of a Checker. +func (tu *thresholdUpdater) Update(status error) { + tu.mu.Lock() + defer tu.mu.Unlock() + + if status == nil { + tu.count = 0 + } else if tu.count < tu.threshold { + tu.count++ + } + + tu.status = status +} + +// NewThresholdStatusUpdater returns a new thresholdUpdater +func NewThresholdStatusUpdater(t int) Updater { + return &thresholdUpdater{threshold: t} +} + +// PeriodicChecker wraps an updater to provide a periodic checker +func PeriodicChecker(check Checker, period time.Duration) Checker { + u := NewStatusUpdater() + go func() { + t := time.NewTicker(period) + for { + <-t.C + u.Update(check.Check()) + } + }() + + return u +} + +// PeriodicThresholdChecker wraps an updater to provide a periodic checker that +// uses a threshold before it changes status +func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { + tu := NewThresholdStatusUpdater(threshold) + go func() { + t := time.NewTicker(period) + for { + <-t.C + tu.Update(check.Check()) + } + }() + + return tu +} + +// CheckStatus returns a map with all the current health check errors +func CheckStatus() map[string]string { + mutex.RLock() + defer mutex.RUnlock() + statusKeys := make(map[string]string) + for k, v := range registeredChecks { + err := v.Check() + if err != nil { + statusKeys[k] = err.Error() + } + } + + return statusKeys +} + +// Register associates the checker with the provided name. We allow +// overwrites to a specific check status. +func Register(name string, check Checker) { + mutex.Lock() + defer mutex.Unlock() + _, ok := registeredChecks[name] + if ok { + panic("Check already exists: " + name) + } + registeredChecks[name] = check +} + +// RegisterFunc allows the convenience of registering a checker directly +// from an arbitrary func() error +func RegisterFunc(name string, check func() error) { + Register(name, CheckFunc(check)) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// from an arbitrary func() error +func RegisterPeriodicFunc(name string, check func() error, period time.Duration) { + Register(name, PeriodicChecker(CheckFunc(check), period)) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker from an arbitrary func() error +func RegisterPeriodicThresholdFunc(name string, check func() error, period time.Duration, threshold int) { + Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) +} + +// StatusHandler returns a JSON blob with all the currently registered Health Checks +// and their corresponding status. +// Returns 503 if any Error status exists, 200 otherwise +func StatusHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + checksStatus := CheckStatus() + // If there is an error, return 503 + if len(checksStatus) != 0 { + w.WriteHeader(http.StatusServiceUnavailable) + } + encoder := json.NewEncoder(w) + err := encoder.Encode(checksStatus) + + // Parsing of the JSON failed. Returning generic error message + if err != nil { + encoder.Encode(struct { + ServerError string `json:"server_error"` + }{ + ServerError: "Could not parse error message", + }) + } + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// Registers global /debug/health api endpoint +func init() { + http.HandleFunc("/debug/health", StatusHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go new file mode 100644 index 00000000..7989f0b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go @@ -0,0 +1,47 @@ +package health + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" +) + +// TestReturns200IfThereAreNoChecks ensures that the result code of the health +// endpoint is 200 if there are not currently registered checks. +func TestReturns200IfThereAreNoChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + StatusHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } +} + +// TestReturns500IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 500 if there are health checks with errors +func TestReturns503IfThereAreErrorChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + // Create a manual error + Register("some_check", CheckFunc(func() error { + return errors.New("This Check did not succeed") + })) + + StatusHandler(recorder, req) + + if recorder.Code != 503 { + t.Errorf("Did not get a 503.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go new file mode 100644 index 00000000..48467d48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go @@ -0,0 +1,124 @@ +package manifest + +import ( + "encoding/json" + + "github.com/docker/distribution/digest" + "github.com/docker/libtrust" +) + +// TODO(stevvooe): When we rev the manifest format, the contents of this +// package should be moved to manifest/v1. + +const ( + // ManifestMediaType specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally + // "application/json". + ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json" +) + +// Versioned provides a struct with just the manifest schemaVersion. Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. It contains fields to +type SignedManifest struct { + Manifest + + // Raw is the byte representation of the ImageManifest, used for signature + // verification. The value of Raw must be used directly during + // serialization, or the signature check will fail. The manifest byte + // representation cannot change or it will have to be re-signed. + Raw []byte `json:"-"` +} + +// UnmarshalJSON populates a new ImageManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + var manifest Manifest + if err := json.Unmarshal(b, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + sm.Raw = make([]byte, len(b), len(b)) + copy(sm.Raw, b) + + return nil +} + +// Payload returns the raw, signed content of the signed manifest. The +// contents can be used to calculate the content identifier. +func (sm *SignedManifest) Payload() ([]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Payload() +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.Raw) > 0 { + return sm.Raw, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest_test.go new file mode 100644 index 00000000..941bfde9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest_test.go @@ -0,0 +1,110 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/libtrust" +) + +type testEnv struct { + name, tag string + manifest *Manifest + signed *SignedManifest + pk libtrust.PrivateKey +} + +func TestManifestMarshaling(t *testing.T) { + env := genEnv(t) + + // Check that the Raw field is the same as json.MarshalIndent with these + // parameters. + p, err := json.MarshalIndent(env.signed, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + + if !bytes.Equal(p, env.signed.Raw) { + t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.Raw), string(p)) + } +} + +func TestManifestUnmarshaling(t *testing.T) { + env := genEnv(t) + + var signed SignedManifest + if err := json.Unmarshal(env.signed.Raw, &signed); err != nil { + t.Fatalf("error unmarshaling signed manifest: %v", err) + } + + if !reflect.DeepEqual(&signed, env.signed) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) + } +} + +func TestManifestVerification(t *testing.T) { + env := genEnv(t) + + publicKeys, err := Verify(env.signed) + if err != nil { + t.Fatalf("error verifying manifest: %v", err) + } + + if len(publicKeys) == 0 { + t.Fatalf("no public keys found in signature") + } + + var found bool + publicKey := env.pk.PublicKey() + // ensure that one of the extracted public keys matches the private key. + for _, candidate := range publicKeys { + if candidate.KeyID() == publicKey.KeyID() { + found = true + break + } + } + + if !found { + t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) + } +} + +func genEnv(t *testing.T) *testEnv { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating test key: %v", err) + } + + name, tag := "foo/bar", "test" + + m := Manifest{ + Versioned: Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + sm, err := Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + return &testEnv{ + name: name, + tag: tag, + manifest: &m, + signed: sm, + pk: pk, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/sign.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/sign.go new file mode 100644 index 00000000..a4c37652 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/sign.go @@ -0,0 +1,66 @@ +package manifest + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + Raw: pretty, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + Raw: pretty, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/verify.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/verify.go new file mode 100644 index 00000000..3e051b26 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/verify.go @@ -0,0 +1,32 @@ +package manifest + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go new file mode 100644 index 00000000..b97925a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go @@ -0,0 +1,155 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/uuid" +) + +type bridge struct { + ub URLBuilder + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name, tag string) (string, error) + BuildBlobURL(name string, dgst digest.Digest) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { + return &bridge{ + ub: ub, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: context.RemoteAddr(r), + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo string, sm *manifest.SignedManifest) error { + return b.createManifestEventAndWrite(EventActionPush, repo, sm) +} + +func (b *bridge) ManifestPulled(repo string, sm *manifest.SignedManifest) error { + return b.createManifestEventAndWrite(EventActionPull, repo, sm) +} + +func (b *bridge) ManifestDeleted(repo string, sm *manifest.SignedManifest) error { + return b.createManifestEventAndWrite(EventActionDelete, repo, sm) +} + +func (b *bridge) BlobPushed(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) +} + +func (b *bridge) BlobPulled(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) +} + +func (b *bridge) BlobDeleted(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionDelete, repo, desc) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo string, sm *manifest.SignedManifest) error { + manifestEvent, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) createManifestEvent(action string, repo string, sm *manifest.SignedManifest) (*Event, error) { + event := b.createEvent(action) + event.Target.MediaType = manifest.ManifestMediaType + event.Target.Repository = repo + + p, err := sm.Payload() + if err != nil { + return nil, err + } + + event.Target.Length = int64(len(p)) + event.Target.Size = int64(len(p)) + event.Target.Digest, err = digest.FromBytes(p) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, event.Target.Digest.String()) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) { + event := b.createEvent(action) + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo + + var err error + event.Target.URL, err = b.ub.BuildBlobURL(repo, desc.Digest) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.Generate().String(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go new file mode 100644 index 00000000..fbf557d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go @@ -0,0 +1,166 @@ +package notifications + +import ( + "testing" + + "github.com/docker/distribution/digest" + + "github.com/docker/libtrust" + + "github.com/docker/distribution/manifest" + + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/uuid" +) + +var ( + // common environment for expected manifest events. + + repo = "test/repo" + source = SourceRecord{ + Addr: "remote.test", + InstanceID: uuid.Generate().String(), + } + ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/")) + + actor = ActorRecord{ + Name: "test", + } + request = RequestRecord{} + m = manifest.Manifest{ + Name: repo, + Tag: "latest", + } + + sm *manifest.SignedManifest + payload []byte + dgst digest.Digest +) + +func TestEventBridgeManifestPulled(t *testing.T) { + + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + + return nil + })) + + if err := l.ManifestPulled(repo, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushed(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + + return nil + })) + + if err := l.ManifestPushed(repo, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionDelete, events...) + + return nil + })) + + if err := l.ManifestDeleted(repo, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func createTestEnv(t *testing.T, fn testSinkFn) Listener { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating private key: %v", err) + } + + sm, err = manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + payload, err = sm.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + dgst, err = digest.FromBytes(payload) + if err != nil { + t.Fatalf("error digesting manifest payload: %v", err) + } + + return NewBridge(ub, source, actor, request, fn) +} + +func checkCommonManifest(t *testing.T, action string, events ...Event) { + checkCommon(t, events...) + + event := events[0] + if event.Action != action { + t.Fatalf("unexpected event action: %q != %q", event.Action, action) + } + + u, err := ub.BuildManifestURL(repo, dgst.String()) + if err != nil { + t.Fatalf("error building expected url: %v", err) + } + + if event.Target.URL != u { + t.Fatalf("incorrect url passed: %q != %q", event.Target.URL, u) + } +} + +func checkCommon(t *testing.T, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Length != int64(len(payload)) { + t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +type testSinkFn func(events ...Event) error + +func (tsf testSinkFn) Write(events ...Event) error { + return tsf(events...) +} + +func (tsf testSinkFn) Close() error { return nil } + +func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { + if err != nil { + panic(err) + } + + return ub +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go new file mode 100644 index 00000000..dfdb111c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go @@ -0,0 +1,86 @@ +package notifications + +import ( + "net/http" + "time" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go new file mode 100644 index 00000000..97030026 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionDelete = "delete" +) + +const ( + // EventsMediaType is the mediatype for the json event envelope. If the + // Event, ActorRecord, SourceRecord or Envelope structs change, the version + // number should be incremented. + EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + // LayerMediaType is the media type for image rootfs diffs (aka "layers") + // used by Docker. We don't expect this to change for quite a while. + layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // TODO(stevvooe): Use http.DetectContentType for layers, maybe. + + distribution.Descriptor + + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + + // Repository identifies the named repository. + Repository string `json:"repository,omitempty"` + + // URL provides a direct link to the content. + URL string `json:"url,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorizaton context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorizaton context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go new file mode 100644 index 00000000..ac4dfd93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go @@ -0,0 +1,157 @@ +package notifications + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/distribution/manifest" +) + +// TestEventJSONFormat provides silly test to detect if the event format or +// envelope has changed. If this code fails, the revision of the protocol may +// need to be incremented. +func TestEventEnvelopeJSONFormat(t *testing.T) { + var expected = strings.TrimSpace(` +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "size": 1, + "digest": "sha256:0123456789abcdef0", + "length": 1, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 2, + "digest": "tarsum.v2+sha256:0123456789abcdef1", + "length": 2, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 3, + "digest": "tarsum.v2+sha256:0123456789abcdef2", + "length": 3, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} + `) + + tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) + if err != nil { + t.Fatalf("error creating time: %v", err) + } + + var prototype Event + prototype.Action = EventActionPush + prototype.Timestamp = tm + prototype.Actor.Name = "test-actor" + prototype.Request.ID = "asdfasdf" + prototype.Request.Addr = "client.local" + prototype.Request.Host = "registrycluster.local" + prototype.Request.Method = "PUT" + prototype.Request.UserAgent = "test/0.1" + prototype.Source.Addr = "hostname.local:port" + + var manifestPush Event + manifestPush = prototype + manifestPush.ID = "asdf-asdf-asdf-asdf-0" + manifestPush.Target.Digest = "sha256:0123456789abcdef0" + manifestPush.Target.Length = 1 + manifestPush.Target.Size = 1 + manifestPush.Target.MediaType = manifest.ManifestMediaType + manifestPush.Target.Repository = "library/test" + manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush0 Event + layerPush0 = prototype + layerPush0.ID = "asdf-asdf-asdf-asdf-1" + layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" + layerPush0.Target.Length = 2 + layerPush0.Target.Size = 2 + layerPush0.Target.MediaType = layerMediaType + layerPush0.Target.Repository = "library/test" + layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush1 Event + layerPush1 = prototype + layerPush1.ID = "asdf-asdf-asdf-asdf-2" + layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" + layerPush1.Target.Length = 3 + layerPush1.Target.Size = 3 + layerPush1.Target.MediaType = layerMediaType + layerPush1.Target.Repository = "library/test" + layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var envelope Envelope + envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling envelope: %v", err) + } + if string(p) != expected { + t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go new file mode 100644 index 00000000..465434f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go @@ -0,0 +1,147 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: http.DefaultTransport.(*http.Transport), + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + defer resp.Body.Close() + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go new file mode 100644 index 00000000..e0276ccd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go @@ -0,0 +1,157 @@ +package notifications + +import ( + "encoding/json" + "fmt" + "mime" + "net/http" + "net/http/httptest" + "reflect" + "strconv" + "testing" + + "github.com/docker/distribution/manifest" +) + +// TestHTTPSink mocks out an http endpoint and notifies it under a couple of +// conditions, ensuring correct behavior. +func TestHTTPSink(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + t.Fatalf("unexpected request method: %v", r.Method) + return + } + + // Extract the content type and make sure it matches + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) + return + } + + if mediaType != EventsMediaType { + w.WriteHeader(http.StatusUnsupportedMediaType) + t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) + return + } + + var envelope Envelope + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&envelope); err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error decoding request body: %v", err) + return + } + + // Let caller choose the status + status, err := strconv.Atoi(r.FormValue("status")) + if err != nil { + t.Logf("error parsing status: %v", err) + + // May just be empty, set status to 200 + status = http.StatusOK + } + + w.WriteHeader(status) + })) + + metrics := newSafeMetrics() + sink := newHTTPSink(server.URL, 0, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + + var expectedMetrics EndpointMetrics + expectedMetrics.Statuses = make(map[string]int) + + for _, tc := range []struct { + events []Event // events to send + url string + failure bool // true if there should be a failure. + statusCode int // if not set, no status code should be incremented. + }{ + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", manifest.ManifestMediaType)}, + }, + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", manifest.ManifestMediaType), + createTestEvent("push", "library/test", layerMediaType), + createTestEvent("push", "library/test", layerMediaType), + }, + }, + { + statusCode: http.StatusTemporaryRedirect, + }, + { + statusCode: http.StatusBadRequest, + failure: true, + }, + { + // Case where connection never goes through. + url: "http://shoudlntresolve/", + failure: true, + }, + } { + + if tc.failure { + expectedMetrics.Failures += len(tc.events) + } else { + expectedMetrics.Successes += len(tc.events) + } + + if tc.statusCode > 0 { + expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) + } + + url := tc.url + if url == "" { + url = server.URL + "/" + } + // setup endpoint to respond with expected status code. + url += fmt.Sprintf("?status=%v", tc.statusCode) + sink.url = url + + t.Logf("testcase: %v, fail=%v", url, tc.failure) + // Try a simple event emission. + err := sink.Write(tc.events...) + + if !tc.failure { + if err != nil { + t.Fatalf("unexpected error send event: %v", err) + } + } else { + if err == nil { + t.Fatalf("the endpoint should have rejected the request") + } + } + + if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { + t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) + } + } + + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // double close returns error + if err := sink.Close(); err == nil { + t.Fatalf("second close should have returned error: %v", err) + } + +} + +func createTestEvent(action, repo, typ string) Event { + event := createEvent(action) + + event.Target.MediaType = typ + event.Target.Repository = repo + + return *event +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go new file mode 100644 index 00000000..b86fa8a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go @@ -0,0 +1,205 @@ +package notifications + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo string, sm *manifest.SignedManifest) error + ManifestPulled(repo string, sm *manifest.SignedManifest) error + + // TODO(stevvooe): Please note that delete support is still a little shaky + // and we'll need to propagate these in the future. + + ManifestDeleted(repo string, sm *manifest.SignedManifest) error +} + +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo string, desc distribution.Descriptor) error + BlobPulled(repo string, desc distribution.Descriptor) error + + // TODO(stevvooe): Please note that delete support is still a little shaky + // and we'll need to propagate these in the future. + + BlobDeleted(repo string, desc distribution.Descriptor) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + BlobListener +} + +type repositoryListener struct { + distribution.Repository + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo distribution.Repository, listener Listener) distribution.Repository { + return &repositoryListener{ + Repository: repo, + listener: listener, + } +} + +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return &manifestServiceListener{ + ManifestService: manifests, + parent: rl, + }, nil +} + +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, + } +} + +type manifestServiceListener struct { + distribution.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm, err := msl.ManifestService.Get(dgst) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { + logrus.Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(sm *manifest.SignedManifest) error { + err := msl.ManifestService.Put(sm) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { + logrus.Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return err +} + +func (msl *manifestServiceListener) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + sm, err := msl.ManifestService.GetByTag(tag, options...) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { + logrus.Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +type blobServiceListener struct { + distribution.BlobStore + parent *repositoryListener +} + +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) + if err == nil { + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + + return desc, err +} + +func (bsl *blobServiceListener) Create(ctx context.Context) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, + } +} + +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener +} + +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) + if err == nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + } + } + + return committed, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go new file mode 100644 index 00000000..ccd84593 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go @@ -0,0 +1,185 @@ +package notifications + +import ( + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +func TestListener(t *testing.T) { + ctx := context.Background() + registry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + tl := &testListener{ + ops: make(map[string]int), + } + + repository, err := registry.Repository(ctx, "foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + repository = Listen(repository, tl) + + // Now take the registry through a number of operations + checkExerciseRepository(t, repository) + + expectedOps := map[string]int{ + "manifest:push": 1, + "manifest:pull": 2, + // "manifest:delete": 0, // deletes not supported for now + "layer:push": 2, + "layer:pull": 2, + // "layer:delete": 0, // deletes not supported for now + } + + if !reflect.DeepEqual(tl.ops, expectedOps) { + t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) + } + +} + +type testListener struct { + ops map[string]int +} + +func (tl *testListener) ManifestPushed(repo string, sm *manifest.SignedManifest) error { + tl.ops["manifest:push"]++ + + return nil +} + +func (tl *testListener) ManifestPulled(repo string, sm *manifest.SignedManifest) error { + tl.ops["manifest:pull"]++ + return nil +} + +func (tl *testListener) ManifestDeleted(repo string, sm *manifest.SignedManifest) error { + tl.ops["manifest:delete"]++ + return nil +} + +func (tl *testListener) BlobPushed(repo string, desc distribution.Descriptor) error { + tl.ops["layer:push"]++ + return nil +} + +func (tl *testListener) BlobPulled(repo string, desc distribution.Descriptor) error { + tl.ops["layer:pull"]++ + return nil +} + +func (tl *testListener) BlobDeleted(repo string, desc distribution.Descriptor) error { + tl.ops["layer:delete"]++ + return nil +} + +// checkExerciseRegistry takes the registry through all of its operations, +// carrying out generic checks. +func checkExerciseRepository(t *testing.T, repository distribution.Repository) { + // TODO(stevvooe): This would be a nice testutil function. Basically, it + // takes the registry through a common set of operations. This could be + // used to make cross-cutting updates by changing internals that affect + // update counts. Basically, it would make writing tests a lot easier. + ctx := context.Background() + tag := "thetag" + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: repository.Name(), + Tag: tag, + } + + blobs := repository.Blobs(ctx) + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating test layer: %v", err) + } + dgst := digest.Digest(ds) + + wr, err := blobs.Create(ctx) + if err != nil { + t.Fatalf("error creating layer upload: %v", err) + } + + // Use the resumes, as well! + wr, err = blobs.Resume(ctx, wr.ID()) + if err != nil { + t.Fatalf("error resuming layer upload: %v", err) + } + + io.Copy(wr, rs) + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + + m.FSLayers = append(m.FSLayers, manifest.FSLayer{ + BlobSum: dgst, + }) + + // Then fetch the blobs + if rc, err := blobs.Open(ctx, dgst); err != nil { + t.Fatalf("error fetching layer: %v", err) + } else { + defer rc.Close() + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating key: %v", err) + } + + sm, err := manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + manifests, err := repository.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + if err = manifests.Put(sm); err != nil { + t.Fatalf("unexpected error putting the manifest: %v", err) + } + + p, err := sm.Payload() + if err != nil { + t.Fatalf("unexpected error getting manifest payload: %v", err) + } + + dgst, err := digest.FromBytes(p) + if err != nil { + t.Fatalf("unexpected error digesting manifest payload: %v", err) + } + + fetchedByManifest, err := manifests.Get(dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if fetchedByManifest.Tag != sm.Tag { + t.Fatalf("retrieved unexpected manifest: %v", err) + } + + fetched, err := manifests.GetByTag(tag) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if fetched.Tag != fetchedByManifest.Tag { + t.Fatalf("retrieved unexpected manifest: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go new file mode 100644 index 00000000..2a8ffcbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevent counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevent events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go new file mode 100644 index 00000000..dda4a565 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go @@ -0,0 +1,337 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker heuristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a successful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// heuristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks_test.go new file mode 100644 index 00000000..89756a99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks_test.go @@ -0,0 +1,223 @@ +package notifications + +import ( + "fmt" + "math/rand" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "testing" +) + +func TestBroadcaster(t *testing.T) { + const nEvents = 1000 + var sinks []Sink + + for i := 0; i < 10; i++ { + sinks = append(sinks, &testSink{}) + } + + b := NewBroadcaster(sinks...) + + var block []Event + var wg sync.WaitGroup + for i := 1; i <= nEvents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := b.Write(block...); err != nil { + t.Fatalf("error writing block of length %d: %v", len(block), err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() // Wait until writes complete + checkClose(t, b) + + // Iterate through the sinks and check that they all have the expected length. + for _, sink := range sinks { + ts := sink.(*testSink) + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != nEvents { + t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + } + +} + +func TestEventQueue(t *testing.T) { + const nevents = 1000 + var ts testSink + metrics := newSafeMetrics() + eq := newEventQueue( + // delayed sync simulates destination slower than channel comms + &delayedSink{ + Sink: &ts, + delay: time.Millisecond * 1, + }, metrics.eventQueueListener()) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= nevents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := eq.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, eq) + + ts.mu.Lock() + defer ts.mu.Unlock() + metrics.Lock() + defer metrics.Unlock() + + if len(ts.events) != nevents { + t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + + if metrics.Events != nevents { + t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) + } + + if metrics.Pending != 0 { + t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) + } +} + +func TestRetryingSink(t *testing.T) { + + // Make a sync that fails most of the time, ensuring that all the events + // make it through. + var ts testSink + flaky := &flakySink{ + rate: 1.0, // start out always failing. + Sink: &ts, + } + s := newRetryingSink(flaky, 3, 10*time.Millisecond) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= 100; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + // Above 50, set the failure rate lower + if i > 50 { + s.mu.Lock() + flaky.rate = 0.90 + s.mu.Unlock() + } + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + defer wg.Done() + if err := s.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, s) + + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != 100 { + t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) + } +} + +type testSink struct { + events []Event + mu sync.Mutex + closed bool +} + +func (ts *testSink) Write(events ...Event) error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.events = append(ts.events, events...) + return nil +} + +func (ts *testSink) Close() error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.closed = true + + logrus.Infof("closing testSink") + return nil +} + +type delayedSink struct { + Sink + delay time.Duration +} + +func (ds *delayedSink) Write(events ...Event) error { + time.Sleep(ds.delay) + return ds.Sink.Write(events...) +} + +type flakySink struct { + Sink + rate float64 +} + +func (fs *flakySink) Write(events ...Event) error { + if rand.Float64() < fs.rate { + return fmt.Errorf("error writing %d events", len(events)) + } + + return fs.Sink.Write(events...) +} + +func checkClose(t *testing.T, sink Sink) { + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing: %v", err) + } + + // second close should not crash but should return an error. + if err := sink.Close(); err == nil { + t.Fatalf("no error on double close") + } + + // Write after closed should be an error + if err := sink.Write([]Event{}...); err == nil { + t.Fatalf("write after closed did not have an error") + } else if err != ErrSinkClosed { + t.Fatalf("error should be ErrSinkClosed") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile new file mode 100644 index 00000000..1e2a8471 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:14.04 + +ENV GOLANG_VERSION 1.4rc1 +ENV GOPATH /var/cache/drone +ENV GOROOT /usr/local/go +ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin + +ENV LANG C +ENV LC_ALL C + +RUN apt-get update && apt-get install -y \ + wget ca-certificates git mercurial bzr \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ + tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ + rm go${GOLANG_VERSION}.linux-amd64.tar.gz + +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md new file mode 100644 index 00000000..eda88696 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md @@ -0,0 +1,6 @@ +Git Hooks +========= + +To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. + +As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh new file mode 100644 index 00000000..6afea8a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +cd $(dirname $0) + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +set -e +set -x + +# Just in case the directory doesn't exist +mkdir -p $REPO_ROOT/.git/hooks + +ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit new file mode 100644 index 00000000..3ee2e913 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit @@ -0,0 +1,29 @@ +#!/bin/sh + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +cd $REPO_ROOT + +GOFMT_ERRORS=$(gofmt -s -l . 2>&1) +if [ -n "$GOFMT_ERRORS" ]; then + printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr + exit 1 +fi + +GOLINT_ERRORS=$(golint ./... 2>&1) +if [ -n "$GOLINT_ERRORS" ]; then + printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr + exit 1 +fi + +GOVET_ERRORS=$(go vet ./... 2>&1) +GOVET_STATUS=$? +if [ "$GOVET_STATUS" -ne "0" ]; then + printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr + exit $GOVET_STATUS +fi diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry.go new file mode 100644 index 00000000..1a3de01d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry.go @@ -0,0 +1,120 @@ +package distribution + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name string) (Repository, error) + + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption func(ManifestService) error + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Name returns the name of the repository. + Name() string + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Signatures returns a reference to this repository's signatures service. + Signatures() SignatureService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. + +// ManifestService provides operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(dgst digest.Digest) (bool, error) + + // Get retrieves the identified by the digest, if it exists. + Get(dgst digest.Digest) (*manifest.SignedManifest, error) + + // Delete removes the manifest, if it exists. + Delete(dgst digest.Digest) error + + // Put creates or updates the manifest. + Put(manifest *manifest.SignedManifest) error + + // TODO(stevvooe): The methods after this message should be moved to a + // discrete TagService, per active proposals. + + // Tags lists the tags under the named repository. + Tags() ([]string, error) + + // ExistsByTag returns true if the manifest exists. + ExistsByTag(tag string) (bool, error) + + // GetByTag retrieves the named manifest, if it exists. + GetByTag(tag string, options ...ManifestServiceOption) (*manifest.SignedManifest, error) + + // TODO(stevvooe): There are several changes that need to be done to this + // interface: + // + // 1. Allow explicit tagging with Tag(digest digest.Digest, tag string) + // 2. Support reading tags with a re-entrant reader to avoid large + // allocations in the registry. + // 3. Long-term: Provide All() method that lets one scroll through all of + // the manifest entries. + // 4. Long-term: break out concept of signing from manifests. This is + // really a part of the distribution sprint. + // 5. Long-term: Manifest should be an interface. This code shouldn't + // really be concerned with the storage format. +} + +// SignatureService provides operations on signatures. +type SignatureService interface { + // Get retrieves all of the signature blobs for the specified digest. + Get(dgst digest.Digest) ([][]byte, error) + + // Put stores the signature for the provided digest. + Put(dgst digest.Digest, signatures ...[]byte) error +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 00000000..fdaddbcf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,259 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + return ec.Descriptor().Value +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", + strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), + e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go new file mode 100644 index 00000000..27fb1cec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go @@ -0,0 +1,179 @@ +package errcode + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" +) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + if len(errorCodeToDescriptors) == 0 { + t.Fatal("errors aren't loaded!") + } + + for ec, desc := range errorCodeToDescriptors { + if ec != desc.Code { + t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) + } + + if idToDescriptors[desc.Value].Code != ec { + t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) + } + + if ec.Message() != desc.Message { + t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) + } + + // Test (de)serializing the ErrorCode + p, err := json.Marshal(ec) + if err != nil { + t.Fatalf("couldn't marshal ec %v: %v", ec, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", ec) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) + } + } + +} + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs = append(errs, ErrorCodeTest1) + errs = append(errs, ErrorCodeTest2.WithDetail( + map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := `{"errors":[` + + `{"code":"TEST1","message":"test error 1"},` + + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + + `]}` + + if string(p) != expectedJSON { + t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) + } + + // Now test the reverse + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Test the arg substitution stuff + e1 := unmarshaled[3].(Error) + exp1 := `Sorry "BOOGIE" isn't valid` + if e1.Message != exp1 { + t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) + } + + exp1 = "test3: " + exp1 + if e1.Error() != exp1 { + t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) + } + + // Test again with a single value this time + errs = Errors{ErrorCodeUnknown} + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + // Now test the reverse + unmarshaled = nil + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Verify that calling WithArgs() more than once does the right thing. + // Meaning creates a new Error and uses the ErrorCode Message + e1 = ErrorCodeTest3.WithArgs("test1") + e2 := e1.WithArgs("test2") + if &e1 == &e2 { + t.Fatalf("args: e2 and e1 should not be the same, but they are") + } + if e2.Message != `Sorry "test2" isn't valid` { + t.Fatalf("e2 had wrong message: %q", e2.Message) + } + + // Verify that calling WithDetail() more than once does the right thing. + // Meaning creates a new Error and overwrites the old detail field + e1 = ErrorCodeTest3.WithDetail("stuff1") + e2 = e1.WithDetail("stuff2") + if &e1 == &e2 { + t.Fatalf("detail: e2 and e1 should not be the same, but they are") + } + if e2.Detail != `stuff2` { + t.Fatalf("e2 had wrong detail: %q", e2.Detail) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 00000000..49a64a86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 00000000..42f911b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,86 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +// ErrorCodeUnknown is a generic error that can be used as a last +// resort if there is no situation-specific error message that can be used +var ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 00000000..0ef64f88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1550 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: RepositoryNameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: TagNameRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponse = ResponseDescriptor{ + Description: "The client does not have access to the repository.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: unauthorizedErrorsBody, + }, + } + + unauthorizedResponsePush = ResponseDescriptor{ + Description: "The client does not have access to push to the repository.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: unauthorizedErrorsBody, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` + + unauthorizedErrorsBody = `{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particalar request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particalar response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status recieved by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The client is not authorized to access the registry.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Description: "The named manifest is not known to the registry.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have permission to push to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Delete is not enabled on the registry", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponsePush, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponsePush, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch Complete", + Description: "Request an unabridged list of repositories available.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 00000000..87e27f2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,154 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + }) + + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verfication. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go new file mode 100644 index 00000000..14b7ea60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go @@ -0,0 +1,83 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" +) + +// TODO(stevvooe): Move these definitions to the future "reference" package. +// While they are used with v2 definitions, their relevance expands beyond. + +const ( + // RepositoryNameTotalLengthMax is the maximum total number of characters in + // a repository name + RepositoryNameTotalLengthMax = 255 +) + +// RepositoryNameComponentRegexp restricts registry path component names to +// start with at least one letter or number, with following parts able to +// be separated by one period, dash or underscore. +var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) + +// RepositoryNameComponentAnchoredRegexp is the version of +// RepositoryNameComponentRegexp which must completely match the content +var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) + +// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow +// multiple path components, separated by a forward slash. +var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String()) + +// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. +var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) + +// TagNameAnchoredRegexp matches valid tag names, anchored at the start and +// end of the matched string. +var TagNameAnchoredRegexp = regexp.MustCompile("^" + TagNameRegexp.String() + "$") + +var ( + // ErrRepositoryNameEmpty is returned for empty, invalid repository names. + ErrRepositoryNameEmpty = fmt.Errorf("repository name must have at least one component") + + // ErrRepositoryNameLong is returned when a repository name is longer than + // RepositoryNameTotalLengthMax + ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) + + // ErrRepositoryNameComponentInvalid is returned when a repository name does + // not match RepositoryNameComponentRegexp + ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) +) + +// ValidateRepositoryName ensures the repository name is valid for use in the +// registry. This function accepts a superset of what might be accepted by +// docker core or docker hub. If the name does not pass validation, an error, +// describing the conditions, is returned. +// +// Effectively, the name should comply with the following grammar: +// +// alpha-numeric := /[a-z0-9]+/ +// separator := /[._-]/ +// component := alpha-numeric [separator alpha-numeric]* +// namespace := component ['/' component]* +// +// The result of the production, known as the "namespace", should be limited +// to 255 characters. +func ValidateRepositoryName(name string) error { + if name == "" { + return ErrRepositoryNameEmpty + } + + if len(name) > RepositoryNameTotalLengthMax { + return ErrRepositoryNameLong + } + + components := strings.Split(name, "/") + + for _, component := range components { + if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { + return ErrRepositoryNameComponentInvalid + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go new file mode 100644 index 00000000..656ae846 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go @@ -0,0 +1,231 @@ +package v2 + +import ( + "strconv" + "strings" + "testing" +) + +var ( + // regexpTestcases is a unified set of testcases for + // TestValidateRepositoryName and TestRepositoryNameRegexp. + // Some of them are valid inputs for one and not the other. + regexpTestcases = []struct { + // input is the repository name or name component testcase + input string + // err is the error expected from ValidateRepositoryName, or nil + err error + // invalid should be true if the testcase is *not* expected to + // match RepositoryNameRegexp + invalid bool + }{ + { + input: "", + err: ErrRepositoryNameEmpty, + }, + { + input: "short", + }, + { + input: "simple/name", + }, + { + input: "library/ubuntu", + }, + { + input: "docker/stevvooe/app", + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + }, + { + input: "aa/aa/bb/bb/bb", + }, + { + input: "a/a/a/b/b", + }, + { + input: "a/a/a/a/", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "a//a/a", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "a", + }, + { + input: "a/aa", + }, + { + input: "aa/a", + }, + { + input: "a/aa/a", + }, + { + input: "foo.com/", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + // TODO: this testcase should be valid once we switch to + // the reference package. + input: "foo.com:8080/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo.com/bar", + }, + { + input: "foo.com/bar/baz", + }, + { + input: "foo.com/bar/baz/quux", + }, + { + input: "blog.foo.com/bar/baz", + }, + { + input: "asdf", + }, + { + input: "asdf$$^/aa", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "aa-a/aa", + }, + { + input: "aa/aa", + }, + { + input: "a-a/a-a", + }, + { + input: "a-/a/a/a", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: strings.Repeat("a", 255), + }, + { + input: strings.Repeat("a", 256), + err: ErrRepositoryNameLong, + }, + { + input: "-foo/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo/bar-", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo-/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo/-bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "_foo/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo/bar_", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "____/____", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "_docker/_docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "docker_/docker_", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "b.gcr.io/test.example.com/my-app", // embedded domain component + }, + // TODO(stevvooe): The following is a punycode domain name that we may + // want to allow in the future. Currently, this is not allowed but we + // may want to change this in the future. Adding this here as invalid + // for the time being. + { + input: "xn--n3h.com/myimage", // http://☃.com in punycode + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "xn--7o8h.com/myimage", // http://🐳.com in punycode + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + } +) + +// TestValidateRepositoryName tests the ValidateRepositoryName function, +// which uses RepositoryNameComponentAnchoredRegexp for validation +func TestValidateRepositoryName(t *testing.T) { + for _, testcase := range regexpTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + if err := ValidateRepositoryName(testcase.input); err != testcase.err { + if testcase.err != nil { + if err != nil { + failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) + } else { + failf("expected invalid repository: %v", testcase.err) + } + } else { + if err != nil { + // Wrong error returned. + failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) + } else { + failf("unexpected error validating repository name: %v", err) + } + } + } + } +} + +func TestRepositoryNameRegexp(t *testing.T) { + for _, testcase := range regexpTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + matches := RepositoryNameRegexp.FindString(testcase.input) == testcase.input + if matches == testcase.invalid { + if testcase.invalid { + failf("expected invalid repository name %s", testcase.input) + } else { + failf("expected valid repository name %s", testcase.input) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 00000000..5b80d5be --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go new file mode 100644 index 00000000..b8d724df --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -0,0 +1,355 @@ +package v2 + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" +) + +type routeTestCase struct { + RequestURI string + ExpectedURI string + Vars map[string]string + RouteName string + StatusCode int +} + +// TestRouter registers a test handler with all the routes and ensures that +// each route returns the expected path variables. Not method verification is +// present. This not meant to be exhaustive but as check to ensure that the +// expected variables are extracted. +// +// This may go away as the application structure comes together. +func TestRouter(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBase, + RequestURI: "/v2/", + Vars: map[string]string{}, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/manifests/bar", + Vars: map[string]string{ + "name": "foo", + "reference": "bar", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/tag", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "tag", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "sha256:abcdef01234567890", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/tags/list", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar/baz", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "tarsum.dev+foo:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "sha256:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlobUpload, + RequestURI: "/v2/foo/bar/blobs/uploads/", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "uuid", + }, + }, + { + // support uuid proper + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + }, + }, + { + // supports urlsafe base64 + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + }, + }, + { + // does not match + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", + StatusCode: http.StatusNotFound, + }, + { + // Check ambiguity: ensure we can distinguish between tags for + // "foo/bar/image/image" and image for "foo/bar/image" with tag + // "tags" + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/manifests/tags", + Vars: map[string]string{ + "name": "foo/bar/manifests", + "reference": "tags", + }, + }, + { + // This case presents an ambiguity between foo/bar with tag="tags" + // and list tags for "foo/bar/manifest" + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/manifests/tags/list", + Vars: map[string]string{ + "name": "foo/bar/manifests", + }, + }, + } + + checkTestRouter(t, testCases, "", true) + checkTestRouter(t, testCases, "/prefix/", true) +} + +func TestRouterWithPathTraversals(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/../bar/baz/tags/list", + ExpectedURI: "/v2/bar/baz/tags/list", + Vars: map[string]string{ + "name": "bar/baz", + }, + }, + } + checkTestRouter(t, testCases, "", false) +} + +func TestRouterWithBadCharacters(t *testing.T) { + if testing.Short() { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/不bar/tags/list", + StatusCode: http.StatusNotFound, + }, + } + checkTestRouter(t, testCases, "", true) + } else { + // in the long version we're going to fuzz the router + // with random UTF8 characters not in the 128 bit ASCII range. + // These are not valid characters for the router and we expect + // 404s on every test. + rand.Seed(time.Now().UTC().UnixNano()) + testCases := make([]routeTestCase, 1000) + for idx := range testCases { + testCases[idx] = routeTestCase{ + RouteName: RouteNameTags, + RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), + StatusCode: http.StatusNotFound, + } + } + checkTestRouter(t, testCases, "", true) + } +} + +func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { + router := RouterWithPrefix(prefix) + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range testCases { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI + // Register the endpoint + route := router.GetRoute(testcase.RouteName) + if route == nil { + t.Fatalf("route for name %q not found", testcase.RouteName) + } + + route.Handler(testHandler) + + u := server.URL + testcase.RequestURI + + resp, err := http.Get(u) + + if err != nil { + t.Fatalf("error issuing get request: %v", err) + } + + if testcase.StatusCode == 0 { + // Override default, zero-value + testcase.StatusCode = http.StatusOK + } + if testcase.ExpectedURI == "" { + // Override default, zero-value + testcase.ExpectedURI = testcase.RequestURI + } + + if resp.StatusCode != testcase.StatusCode { + t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) + } + + if testcase.StatusCode != http.StatusOK { + resp.Body.Close() + // We don't care about json response. + continue + } + + dec := json.NewDecoder(resp.Body) + + var actualRouteInfo routeTestCase + if err := dec.Decode(&actualRouteInfo); err != nil { + t.Fatalf("error reading json response: %v", err) + } + // Needs to be set out of band + actualRouteInfo.StatusCode = resp.StatusCode + + if actualRouteInfo.RequestURI != testcase.ExpectedURI { + t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) + } + + if actualRouteInfo.RouteName != testcase.RouteName { + t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) + } + + // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want + // that to make the comparison fail. We're otherwise done with the testcase so empty the + // testcase.ExpectedURI + testcase.ExpectedURI = "" + if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { + t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) + } + + resp.Body.Close() + } + +} + +// -------------- START LICENSED CODE -------------- +// The following code is derivative of https://github.com/google/gofuzz +// gofuzz is licensed under the Apache License, Version 2.0, January 2004, +// a copy of which can be found in the LICENSE file at the root of this +// repository. + +// These functions allow us to generate strings containing only multibyte +// characters that are invalid in our URLs. They are used above for fuzzing +// to ensure we always get 404s on these invalid strings +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose() rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +func randomString(length int) string { + runes := make([]rune, length) + for i := range runes { + runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() + } + return string(runes) +} + +// -------------- END LICENSED CODE -------------- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 00000000..42974394 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,234 @@ +package v2 + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/digest" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { + var scheme string + + forwardedProto := r.Header.Get("X-Forwarded-Proto") + + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case r.TLS != nil: + scheme = "https" + case len(r.URL.Scheme) > 0: + scheme = r.URL.Scheme + default: + scheme = "http" + } + + host := r.Host + forwardedHost := r.Header.Get("X-Forwarded-Host") + if len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + manifestURL, err := route.URL("name", name, "reference", reference) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", name, "digest", dgst.String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name, "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root} +} + +type clonedRoute struct { + *mux.Route + root *url.URL +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + return cr.root.ResolveReference(routeURL), nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go new file mode 100644 index 00000000..1113a7dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -0,0 +1,239 @@ +package v2 + +import ( + "net/http" + "net/url" + "testing" +) + +type urlBuilderTestCase struct { + description string + expectedPath string + build func() (string, error) +} + +func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + return []urlBuilderTestCase{ + { + description: "test base url", + expectedPath: "/v2/", + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expectedPath: "/v2/foo/bar/tags/list", + build: func() (string, error) { + return urlBuilder.BuildTagsURL("foo/bar") + }, + }, + { + description: "test manifest url", + expectedPath: "/v2/foo/bar/manifests/tag", + build: func() (string, error) { + return urlBuilder.BuildManifestURL("foo/bar", "tag") + }, + }, + { + description: "build blob url", + expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + build: func() (string, error) { + return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + }, + }, + { + description: "build blob upload url", + expectedPath: "/v2/foo/bar/blobs/uploads/", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar") + }, + }, + { + description: "build blob upload url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + } +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + roots := []string{ + "http://example.com", + "https://example.com", + "http://localhost:5000", + "https://localhost:5443", + } + + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := root + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +func TestURLBuilderWithPrefix(t *testing.T) { + roots := []string{ + "http://example.com/prefix/", + "https://example.com/prefix/", + "http://localhost:5000/prefix/", + "https://localhost:5443/prefix/", + } + + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := root[0:len(root)-1] + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +type builderFromRequestTestCase struct { + request *http.Request + base string +} + +func TestBuilderFromRequest(t *testing.T) { + u, err := url.Parse("http://example.com") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + forwardedHostHeader1 := make(http.Header, 1) + forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com") + + forwardedHostHeader2 := make(http.Header, 1) + forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") + + testRequests := []struct { + request *http.Request + base string + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1}, + base: "http://first.example.com", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, + base: "http://first.example.com", + }, + } + + for _, tr := range testRequests { + builder := NewURLBuilderFromRequest(tr.request) + + for _, testCase := range makeURLBuilderTestCases(builder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := tr.base + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +func TestBuilderFromRequestWithPrefix(t *testing.T) { + u, err := url.Parse("http://example.com/prefix/v2/") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com/prefix/", + }, + } + + for _, tr := range testRequests { + builder := NewURLBuilderFromRequest(tr.request) + + for _, testCase := range makeURLBuilderTestCases(builder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 00000000..862c8d28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,142 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.ServeHTTP(w, r) +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/context" +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case "auth.user": + return uic.user + case "auth.user.name": + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go new file mode 100644 index 00000000..5ac3d84a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -0,0 +1,102 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "errors" + "fmt" + "net/http" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + ErrAuthenticationFailure = errors.New("authentication failured") +) + +type accessController struct { + realm string + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + f, err := os.Open(path.(string)) + if err != nil { + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + return nil, err + } + + return &accessController{realm: realm.(string), htpasswd: h}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: ErrInvalidCredential, + } + } + + if err := ac.htpasswd.authenticateUser(username, password); err != nil { + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge: %#v", ch) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go new file mode 100644 index 00000000..db040547 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go @@ -0,0 +1,122 @@ +package htpasswd + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestBasicAccessController(t *testing.T) { + testRealm := "The-Shire" + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + ctx := context.Background() + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + var userNumber = 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + nonbcrypt := map[string]struct{}{ + "bilbo": {}, + "DeokMan": {}, + } + + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) + + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 00000000..494ad0a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,80 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if an syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go new file mode 100644 index 00000000..309c359a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go @@ -0,0 +1,85 @@ +package htpasswd + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go new file mode 100644 index 00000000..2b801d94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go @@ -0,0 +1,97 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go new file mode 100644 index 00000000..8b5ecb80 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -0,0 +1,71 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(nil, "http.request", r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 00000000..5b1ff7ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,268 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go new file mode 100644 index 00000000..1d04f104 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go new file mode 100644 index 00000000..166816ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go @@ -0,0 +1,343 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Errorf("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Errorf("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentUnixTime := time.Now().Unix() + if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { + log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Error("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Error(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Errorf("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case len(rawJWK) > 0: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go new file mode 100644 index 00000000..9d84d4ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go @@ -0,0 +1,386 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) + } + + rawJWK, err := signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: json.RawMessage(rawJWK), + } + + now := time.Now() + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: now.Add(5 * time.Minute).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + ctx := context.WithValue(nil, "http.request", req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("token accessController did not set auth.user context") + } + + if userInfo.Name != "foo" { + t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go new file mode 100644 index 00000000..d7f95be4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 00000000..7d8f1d95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go new file mode 100644 index 00000000..a6ad45d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -0,0 +1,219 @@ +package auth + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// ChallengeManager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type ChallengeManager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint string) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleChallengeManager returns an instance of +// ChallengeManger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleChallengeManager() ChallengeManager { + return simpleChallengeManager{} +} + +type simpleChallengeManager map[string][]Challenge + +func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { + challenges := m[endpoint] + return challenges, nil +} + +func (m simpleChallengeManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + m[urlCopy.String()] = challenges + + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go new file mode 100644 index 00000000..9b6a5adc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go @@ -0,0 +1,38 @@ +package auth + +import ( + "net/http" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + challenge := challenges[0] + + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) + } + + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) + } + + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 00000000..27a2aa71 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,256 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/transport" +) + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges ChallengeManager + handlers []AuthenticationHandler + transport http.RoundTripper +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + v2Root := strings.Index(req.URL.Path, "/v2/") + if v2Root == -1 { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: req.URL.Path[:v2Root+4], + } + + pingEndpoint := ping.String() + + challenges, err := ea.challenges.GetChallenges(pingEndpoint) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, challenge := range challenges { + if challenge.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +type tokenHandler struct { + header http.Header + creds CredentialStore + scope tokenScope + transport http.RoundTripper + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +// tokenScope represents the scope at which a token will be requested. +// This represents a specific action on a registry resource. +type tokenScope struct { + Resource string + Scope string + Actions []string +} + +func (ts tokenScope) String() string { + return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +} + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + return &tokenHandler{ + transport: transport, + creds: creds, + scope: tokenScope{ + Resource: "repository", + Scope: scope, + Actions: actions, + }, + } +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if err := th.refreshToken(params); err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + + return nil +} + +func (th *tokenHandler) refreshToken(params map[string]string) error { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + now := time.Now() + if now.After(th.tokenExpiration) { + token, err := th.fetchToken(params) + if err != nil { + return err + } + th.tokenCache = token + th.tokenExpiration = now.Add(time.Minute) + } + + return nil +} + +type tokenResponse struct { + Token string `json:"token"` +} + +func (th *tokenHandler) fetchToken(params map[string]string) (token string, err error) { + //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := th.scope.String() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if th.creds != nil { + username, password := th.creds.Basic(realmURL) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return errors.New("no basic auth credentials") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go new file mode 100644 index 00000000..1b4754ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go @@ -0,0 +1,311 @@ +package auth + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/testutil" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +type testAuthenticationWrapper struct { + headers http.Header + authCheck func(string) bool + next http.Handler +} + +func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || !w.authCheck(auth) { + h := rw.Header() + for k, values := range w.headers { + h[k] = values + } + rw.WriteHeader(http.StatusUnauthorized) + return + } + w.next.ServeHTTP(rw, r) +} + +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { + h := testutil.NewHandler(rrm) + wrapper := &testAuthenticationWrapper{ + + headers: http.Header(map[string][]string{ + "X-API-Version": {"registry/2.0"}, + "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, + "WWW-Authenticate": {authenticate}, + }), + authCheck: authCheck, + next: h, + } + + s := httptest.NewServer(wrapper) + return s.URL, s.Close +} + +// ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { + resp, err := http.Get(endpoint) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return nil, err + } + + return APIVersions(resp, versionHeader), err +} + +type testCredentialStore struct { + username string + password string +} + +func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { + return tcs.username, tcs.password +} + +func TestEndpointAuthorizeToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"badtoken"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := NewSimpleChallengeManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + badCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e2, c2 := testServerWithAuth(m, authenicate, badCheck) + defer c2() + + challengeManager2 := NewSimpleChallengeManager() + versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 3 { + t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) + } + if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) + } + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func TestEndpointAuthorizeTokenBasic(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + basicCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func TestEndpointAuthorizeBasic(t *testing.T) { + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + username := "user1" + password := "funSecretPa$$word" + authenicate := fmt.Sprintf("Basic realm=localhost") + validCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 00000000..c7eee4e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,176 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { + newOffset := hbu.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset += int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + hbu.offset = newOffset + + return hbu.offset, nil +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go new file mode 100644 index 00000000..099dca4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go @@ -0,0 +1,211 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +// Test implements distribution.BlobWriter +var _ distribution.BlobWriter = &httpBlobUpload{} + +func TestUploadReadFrom(t *testing.T) { + _, b := newRandomBlob(64) + repo := "test/upload/readfrom" + locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + // Test Valid case + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {"0-63"}, + }), + }, + }, + // Test invalid range + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {""}, + }), + }, + }, + // Test 404 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + }, + }, + // Test 400 valid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte(` + { "errors": + [ + { + "code": "BLOB_UPLOAD_INVALID", + "message": "blob upload invalid", + "detail": "more detail" + } + ] + } `), + }, + }, + // Test 400 invalid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte("something bad happened"), + }, + }, + // Test 500 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + }) + + e, c := testServer(m) + defer c() + + blobUpload := &httpBlobUpload{ + client: &http.Client{}, + } + + // Valid case + blobUpload.location = e + locationPath + n, err := blobUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + if n != 64 { + t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) + } + + // Bad range + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when bad range received") + } + + // 404 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) + } + + // 400 valid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(errcode.Errors); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if len(uploadErr) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) + } else { + v2Err, ok := uploadErr[0].(errcode.Error) + if !ok { + t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) + } + if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { + t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) + } + if expected := "blob upload invalid"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) + } + if expected := "more detail"; v2Err.Detail.(string) != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) + } + } + + // 400 invalid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else { + respStr := string(uploadErr.Response) + if expected := "something bad happened"; respStr != expected { + t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) + } + } + + // 500 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { + t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 00000000..ebd1c36c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,69 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" +) + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + Response: body, + } + } + return errors +} + +func handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == 401 { + err := parseHTTPErrorResponse(resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { + return v2.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + return parseHTTPErrorResponse(resp.Body) + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 00000000..c1e8e07f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,553 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, handleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL +func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + if err := v2.ValidateRepositoryName(name); err != nil { + return nil, err + } + + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name string +} + +func (r *repository) Name() string { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.Name(), + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.Name(), + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.Name(), + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Signatures() distribution.SignatureService { + ms, _ := r.Manifests(r.context) + return &signatures{ + manifests: ms, + } +} + +type signatures struct { + manifests distribution.ManifestService +} + +func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { + m, err := s.manifests.Get(dgst) + if err != nil { + return nil, err + } + return m.Signatures() +} + +func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { + panic("not implemented") +} + +type manifests struct { + name string + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Tags() ([]string, error) { + u, err := ms.ub.BuildTagsURL(ms.name) + if err != nil { + return nil, err + } + + resp, err := ms.client.Get(u) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return nil, err + } + + return tagsResponse.Tags, nil + } else if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + return nil, handleErrorResponse(resp) +} + +func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { + // Call by Tag endpoint since the API uses the same + // URL endpoint for tags and digests. + return ms.ExistsByTag(dgst.String()) +} + +func (ms *manifests) ExistsByTag(tag string) (bool, error) { + u, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, handleErrorResponse(resp) +} + +func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + // Call by Tag endpoint since the API uses the same + // URL endpoint for tags and digests. + return ms.GetByTag(dgst.String()) +} + +// AddEtagToTag allows a client to supply an eTag to GetByTag which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and nil error will be returned. etag is automatically quoted when added to +// this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return func(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") + } +} + +func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + u, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return nil, err + } + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + if _, ok := ms.etags[tag]; ok { + req.Header.Set("If-None-Match", ms.etags[tag]) + } + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, nil + } else if SuccessStatus(resp.StatusCode) { + var sm manifest.SignedManifest + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&sm); err != nil { + return nil, err + } + return &sm, nil + } + return nil, handleErrorResponse(resp) +} + +func (ms *manifests) Put(m *manifest.SignedManifest) error { + manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) + if err != nil { + return err + } + + // todo(richardscothern): do something with options here when they become applicable + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) + if err != nil { + return err + } + + resp, err := ms.client.Do(putRequest) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + // TODO(dmcgowan): make use of digest header + return nil + } + return handleErrorResponse(resp) +} + +func (ms *manifests) Delete(dgst digest.Digest) error { + u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return handleErrorResponse(resp) +} + +type blobs struct { + name string + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, source string) (string, error) { + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + if locationURL.Scheme == "" { + sourceURL, err := url.Parse(source) + if err != nil { + return "", err + } + locationURL = &url.URL{ + Scheme: sourceURL.Scheme, + Host: sourceURL.Host, + Path: location, + } + location = locationURL.String() + } + return location, nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + desc, err := bs.Stat(ctx, dgst) + if err != nil { + return nil, err + } + reader, err := bs.Open(ctx, desc.Digest) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + stat, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.New() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { + u, err := bs.ub.BuildBlobUploadURL(bs.name) + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + } + return nil, handleErrorResponse(resp) +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name string + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + u, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, handleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return handleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go new file mode 100644 index 00000000..26201763 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go @@ -0,0 +1,859 @@ +package client + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution/uuid" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +func newRandomBlob(size int) (digest.Digest, []byte) { + b := make([]byte, size) + if n, err := rand.Read(b); err != nil { + panic(err) + } else if n != size { + panic("unable to read enough bytes") + } + + dgst, err := digest.FromBytes(b) + if err != nil { + panic(err) + } + + return dgst, b +} + +func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) +} + +func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { + headers := map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + } + if link != "" { + headers["Link"] = append(headers["Link"], link) + } + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: route, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(headers), + }, + }) +} + +func TestBlobDelete(t *testing.T) { + dgst, _ := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo := "test.example.com/repo1" + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + err = l.Delete(ctx, dgst) + if err != nil { + t.Errorf("Error deleting blob: %s", err.Error()) + } + +} + +func TestBlobFetch(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + b, err := l.Get(ctx, d1) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b, b1) != 0 { + t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) + } + + // TODO(dmcgowan): Test for unknown blob case +} + +func TestBlobExists(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + stat, err := l.Stat(ctx, d1) + if err != nil { + t.Fatal(err) + } + + if stat.Digest != d1 { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) + } + + if stat.Size != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) + } + + // TODO(dmcgowan): Test error cases and ErrBlobUnknown case +} + +func TestBlobUploadChunked(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + chunks := [][]byte{ + b1[0:256], + b1[256:512], + b1[512:513], + b1[513:1024], + } + repo := "test.example.com/uploadrepo" + uuids := []string{uuid.Generate().String()} + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, + "Docker-Upload-UUID": {uuids[0]}, + "Range": {"0-0"}, + }), + }, + }) + offset := 0 + for i, chunk := range chunks { + uuids = append(uuids, uuid.Generate().String()) + newOffset := offset + len(chunk) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], + Body: chunk, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, + "Docker-Upload-UUID": {uuids[i+1]}, + "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, + }), + }, + }) + offset = newOffset + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(offset)}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) + } + + for _, chunk := range chunks { + n, err := upload.Write(chunk) + if err != nil { + t.Fatal(err) + } + if n != len(chunk) { + t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) + } + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobUploadMonolithic(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo := "test.example.com/uploadrepo" + uploadID := uuid.Generate().String() + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Range": {"0-0"}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Body: b1, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(b1))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) + } + + n, err := upload.ReadFrom(bytes.NewReader(b1)) + if err != nil { + t.Fatal(err) + } + if n != int64(len(b1)) { + t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { + blobs := make([]manifest.FSLayer, blobCount) + history := make([]manifest.History, blobCount) + + for i := 0; i < blobCount; i++ { + dgst, blob := newRandomBlob((i % 5) * 16) + + blobs[i] = manifest.FSLayer{BlobSum: dgst} + history[i] = manifest.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + } + + m := &manifest.SignedManifest{ + Manifest: manifest.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + }, + } + manifestBytes, err := json.Marshal(m) + if err != nil { + panic(err) + } + dgst, err := digest.FromBytes(manifestBytes) + if err != nil { + panic(err) + } + + m.Raw = manifestBytes + + return m, dgst +} + +func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest, _ := digest.FromBytes(content) + getReqWithEtag := testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + reference, + Headers: http.Header(map[string][]string{ + "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, + }), + } + + var getRespWithEtag testutil.Response + if actualDigest.String() == dgst { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusNotModified, + Body: []byte{}, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + } + } else { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + } + + } + *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) +} + +func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + +} + +func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { + if m1.Name != m2.Name { + return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) + } + if m1.Tag != m2.Tag { + return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) + } + if len(m1.FSLayers) != len(m2.FSLayers) { + return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + } + for i := range m1.FSLayers { + if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { + return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) + } + } + if len(m1.History) != len(m2.History) { + return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) + } + for i := range m1.History { + if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { + return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) + } + } + return nil +} + +func TestManifestFetch(t *testing.T) { + ctx := context.Background() + repo := "test.example.com/repo" + m1, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifest(repo, dgst.String(), m1.Raw, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + ok, err := ms.Exists(dgst) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.Get(dgst) + if err != nil { + t.Fatal(err) + } + if err := checkEqualManifest(manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestFetchWithEtag(t *testing.T) { + repo := "test.example.com/repo/by/tag" + m1, d1 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifestWithEtag(repo, "latest", m1.Raw, &m, d1.String()) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + m2, err := ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + if err != nil { + t.Fatal(err) + } + if m2 != nil { + t.Fatal("Expected empty manifest for matching etag") + } +} + +func TestManifestDelete(t *testing.T) { + repo := "test.example.com/repo/delete" + _, dgst1 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo + "/manifests/" + dgst1.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Delete(dgst1); err != nil { + t.Fatal(err) + } + if err := ms.Delete(dgst2); err == nil { + t.Fatal("Expected error deleting unknown manifest") + } + // TODO(dmcgowan): Check for specific unknown error +} + +func TestManifestPut(t *testing.T) { + repo := "test.example.com/repo/delete" + m1, dgst := newRandomSchemaV1Manifest(repo, "other", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/manifests/other", + Body: m1.Raw, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Put(m1); err != nil { + t.Fatal(err) + } + + // TODO(dmcgowan): Check for invalid input error +} + +func TestManifestTags(t *testing.T) { + repo := "test.example.com/repo/tags/list" + tagsList := []byte(strings.TrimSpace(` +{ + "name": "test.example.com/repo/tags/list", + "tags": [ + "tag1", + "tag2", + "funtag" + ] +} + `)) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + tags, err := ms.Tags() + if err != nil { + t.Fatal(err) + } + + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + // TODO(dmcgowan): Check array + + // TODO(dmcgowan): Check for error cases +} + +func TestManifestUnauthorized(t *testing.T) { + repo := "test.example.com/repo" + _, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusUnauthorized, + Body: []byte("garbage"), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = ms.Get(dgst) + if err == nil { + t.Fatal("Expected error fetching manifest") + } + v2Err, ok := err.(errcode.Error) + if !ok { + t.Fatalf("Unexpected error type: %#v", err) + } + if v2Err.Code != v2.ErrorCodeUnauthorized { + t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) + } + if expected := v2.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) + } +} + +func TestCatalog(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=5", + []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 5) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 3 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestCatalogInParts(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=2", + []byte("{\"repositories\":[\"bar\", \"baz\"]}"), + "", &m) + addTestCatalog( + "/v2/_catalog?last=baz&n=2", + []byte("{\"repositories\":[\"foo\"]}"), + "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 2) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != nil { + t.Fatal(err) + } + + if numFilled != 2 { + t.Fatalf("Got wrong number of repos") + } + + numFilled, err = r.Repositories(ctx, entries, "baz") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 1 { + t.Fatalf("Got wrong number of repos") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 00000000..b2e74ddb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,173 @@ +package transport + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, size int64) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + size: size, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + size int64 + + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + + // Simulate io.EOF error if we reach filesize. + if err == nil && hrs.offset >= hrs.size { + err = io.EOF + } + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + var err error + newOffset := hrs.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = hrs.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + if hrs.offset != newOffset { + hrs.reset() + } + + // No problems, set the offset. + hrs.offset = newOffset + } + + return hrs.offset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + hrs.brd = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.brd, nil + } + + // If the offset is great than or equal to size, return a empty, noop reader. + if hrs.offset >= hrs.size { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.offset > 0 { + // TODO(stevvooe): Get this working correctly. + + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", "1-") + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + if hrs.brd == nil { + hrs.brd = bufio.NewReader(hrs.rc) + } else { + hrs.brd.Reset(hrs.rc) + } + + return hrs.brd, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 00000000..30e45fab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go new file mode 100644 index 00000000..1c01e42e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go @@ -0,0 +1,3 @@ +// Package registry is a placeholder package for registry interface +// definitions and utilities. +package registry diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go new file mode 100644 index 00000000..c484835f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go @@ -0,0 +1,1380 @@ +package handlers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" + "golang.org/x/net/context" +) + +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + env := newTestEnv(t, false) + + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + +// TestCatalogAPI tests the /v2/_catalog endpoint +func TestCatalogAPI(t *testing.T) { + chunkLen := 2 + env := newTestEnv(t, false) + + values := url.Values{ + "last": []string{""}, + "n": []string{strconv.Itoa(chunkLen)}} + + catalogURL, err := env.builder.BuildCatalogURL(values) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + // ----------------------------------- + // try to get an empty catalog + resp, err := http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + var ctlg struct { + Repositories []string `json:"repositories"` + } + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // we haven't pushed anything to the registry yet + if len(ctlg.Repositories) != 0 { + t.Fatalf("repositories has unexpected values") + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + + // ----------------------------------- + // push something to the registry and try again + images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} + + for _, image := range images { + createRepository(env, t, image, "sometag") + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != chunkLen { + t.Fatalf("repositories has unexpected values") + } + + for _, image := range images[:chunkLen] { + if !contains(ctlg.Repositories, image) { + t.Fatalf("didn't find our repository '%s' in the catalog", image) + } + } + + link := resp.Header.Get("Link") + if link == "" { + t.Fatalf("repositories has less data than expected") + } + + newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) + + // ----------------------------------- + // get the last chunk of data + + catalogURL, err = env.builder.BuildCatalogURL(newValues) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != 1 { + t.Fatalf("repositories has unexpected values") + } + + lastImage := images[len(images)-1] + if !contains(ctlg.Repositories, lastImage) { + t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) + } + + link = resp.Header.Get("Link") + if link != "" { + t.Fatalf("catalog has unexpected data") + } +} + +func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { + re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") + matches := re.FindStringSubmatch(urlStr) + + if len(matches) != 2 { + t.Fatalf("Catalog link address response was incorrect") + } + linkURL, _ := url.Parse(matches[1]) + urlValues := linkURL.Query() + + if urlValues.Get("n") != strconv.Itoa(numEntries) { + t.Fatalf("Catalog link entry size is incorrect") + } + + if urlValues.Get("last") != last { + t.Fatal("Catalog link last entry is incorrect") + } + + return urlValues +} + +func contains(elems []string, e string) bool { + for _, elem := range elems { + if elem == e { + return true + } + } + return false +} + +func TestURLPrefix(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + config.HTTP.Prefix = "/test/" + + env := newTestEnvWithConfig(t, &config) + + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + parsed, _ := url.Parse(baseURL) + if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { + t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) +} + +type blobArgs struct { + imageName string + layerFile io.ReadSeeker + layerDigest digest.Digest + tarSumStr string +} + +func makeBlobArgs(t *testing.T) blobArgs { + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + + args := blobArgs{ + imageName: "foo/bar", + layerFile: layerFile, + layerDigest: layerDigest, + tarSumStr: tarSumStr, + } + return args +} + +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeBlobArgs(t) + testBlobAPI(t, env, args) + + deleteEnabled = true + env = newTestEnv(t, deleteEnabled) + args = makeBlobArgs(t) + testBlobAPI(t, env, args) + +} + +func TestBlobDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + + args := makeBlobArgs(t) + env = testBlobAPI(t, env, args) + testBlobDelete(t, env, args) +} + +func TestBlobDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeBlobArgs(t) + + imageName := args.imageName + layerDigest := args.layerDigest + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting when disabled: %v", err) + } + + checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) +} + +func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + // ----------------------------------- + // Test fetch for non-existent content + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) + + // ------------------------------------------ + // Start an upload, check the status then cancel + uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) + + // A status check should work + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Range": []string{"0-0"}, + "Docker-Upload-UUID": []string{uploadUUID}, + }) + + req, err := http.NewRequest("DELETE", uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error creating delete request: %v", err) + } + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error sending delete request: %v", err) + } + + checkResponse(t, "deleting upload", resp, http.StatusNoContent) + + // A status check should result in 404 + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) + + // ----------------------------------------- + // Do layer push with an empty body and different digest + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error doing bad layer push: %v", err) + } + + checkResponse(t, "bad layer push", resp, http.StatusBadRequest) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error digesting empty buffer: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + + // This is a valid but empty tarfile! + emptyTar := bytes.Repeat([]byte("\x00"), 1024) + emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) + if err != nil { + t.Fatalf("unexpected error digesting empty tar: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) + + // ------------------------------------------ + // Now, actually do successful upload. + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------------------------ + // Now, push just a chunk + layerFile.Seek(0, 0) + + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + layerFile.Seek(0, 0) + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) + finishUpload(t, env.builder, imageName, uploadURLBase, dgst) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // Verify the body + verifier, err := digest.NewDigestVerifier(layerDigest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + t.Fatalf("response body did not pass verification") + } + + // ---------------- + // Fetch the layer with an invalid digest + badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + resp, err = http.Get(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + + // Cache headers + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, + "Cache-Control": []string{"max-age=31536000"}, + }) + + // Matching etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Non-matching etag, gives 200 + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", "") + resp, err = http.DefaultClient.Do(req) + checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) + + // Missing tests: + // - Upload the same tarsum file under and different repository and + // ensure the content remains uncorrupted. + return env +} + +func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { + // Upload a layer + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf(err.Error()) + } + // --------------- + // Delete a layer + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Try and get it back + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) + + // Delete already deleted layer + resp, err = httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusNotFound) + + // ---------------- + // Attempt to delete a layer with an invalid digest + badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + resp, err = httpDelete(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) + + // ---------------- + // Reupload previously deleted blob + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + layerFile.Seek(0, os.SEEK_SET) + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + // ------------------------ + // Use a head request to see if it exists + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) +} + +func TestDeleteDisabled(t *testing.T) { + env := newTestEnv(t, false) + + imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + // defer resp.Body.Close() + return resp, err +} + +type manifestArgs struct { + imageName string + signedManifest *manifest.SignedManifest + dgst digest.Digest +} + +func makeManifestArgs(t *testing.T) manifestArgs { + args := manifestArgs{ + imageName: "foo/bar", + } + + return args +} + +func TestManifestAPI(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + testManifestAPI(t, env, args) + + deleteEnabled = true + env = newTestEnv(t, deleteEnabled) + args = makeManifestArgs(t) + testManifestAPI(t, env, args) +} + +func TestManifestDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + env, args = testManifestAPI(t, env, args) + testManifestDelete(t, env, args) +} + +func TestManifestDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + testManifestDeleteDisabled(t, env, args) +} + +func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) *testEnv { + imageName := args.imageName + manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + resp, err := httpDelete(manifestURL) + if err != nil { + t.Fatalf("unexpected error deleting manifest %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) + return nil +} + +func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, manifestArgs) { + imageName := args.imageName + tag := "thetag" + + manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, + v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestUnverified: 1, + v2.ErrorCodeBlobUnknown: 2, + v2.ErrorCodeDigestInvalid: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get a unverified manifest + // error. + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := manifest.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + payload, err := signedManifest.Payload() + checkErr(t, err, "getting manifest payload") + + dgst, err := digest.FromBytes(payload) + checkErr(t, err, "digesting manifest") + + args.signedManifest = signedManifest + args.dgst = dgst + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest manifest.SignedManifest + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest manifest.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifestByDigest.Raw, signedManifest.Raw) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + return env, args +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + signedManifest := args.signedManifest + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + unknownManifestDigestURL, err := env.builder.BuildManifestURL(imageName, unknownDigest) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + +} + +type testEnv struct { + pk libtrust.PrivateKey + ctx context.Context + config configuration.Configuration + app *App + server *httptest.Server + builder *v2.URLBuilder +} + +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + }, + } + + return newTestEnvWithConfig(t, &config) +} + +func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { + ctx := context.Background() + + app := NewApp(ctx, *config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + return &testEnv{ + pk: pk, + ctx: ctx, + config: *config, + app: app, + server: server, + builder: builder, + } +} + +func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { + var body []byte + if sm, ok := v.(*manifest.SignedManifest); ok { + body = sm.Raw + } else { + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location string, uuid string) { + layerUploadURL, err := ub.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + + u, err := url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatalf("error parsing location header: %v", err) + } + + uuid = path.Base(u.Path) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + "Docker-Upload-UUID": []string{uuid}, + }) + + return resp.Header.Get("Location"), uuid +} + +// doPushLayer pushes the layer content returning the url on success returning +// the response. If you're only expecting a successful response, use pushLayer. +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + + "digest": []string{dgst.String()}, + }.Encode() + + uploadURL := u.String() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, body) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + return http.DefaultClient.Do(req) +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { + digester := digest.Canonical.New() + + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + sha256Dgst := digester.Digest() + + expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{sha256Dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, dgst digest.Digest) string { + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + }.Encode() + + uploadURL := u.String() + + digester := digest.Canonical.New() + + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + + return resp, digester.Digest(), err +} + +func pushChunk(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { + resp, dgst, err := doPushChunk(t, uploadURLBase, body) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting chunk", resp, http.StatusAccepted) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + checkHeaders(t, resp, http.Header{ + "Range": []string{fmt.Sprintf("0-%d", length-1)}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location"), dgst +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +// checkBodyHasErrorCodes ensures the body is an error body and has the +// expected error codes, returning the error structure, the json slice and a +// count of the errors by code. +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading body %s: %v", msg, err) + } + + var errs errcode.Errors + if err := json.Unmarshal(p, &errs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(errs) == 0 { + t.Fatalf("expected errors in response") + } + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + + expected := map[errcode.ErrorCode]struct{}{} + counts := map[errcode.ErrorCode]int{} + + // Initialize map with zeros for expected + for _, code := range errorCodes { + expected[code] = struct{}{} + counts[code] = 0 + } + + for _, e := range errs { + err, ok := e.(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", e) + } + if _, ok := expected[err.ErrorCode()]; !ok { + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) + } + counts[err.ErrorCode()]++ + } + + // Ensure that counts of expected errors were all non-zero + for code := range expected { + if counts[code] == 0 { + t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) + } + } + + return errs, p, counts +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { + continue + } + } + + for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { + if hv != v { + t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) + } + } + } + } +} + +func checkErr(t *testing.T, err error, msg string) { + if err != nil { + t.Fatalf("unexpected error %s: %v", msg, err) + } +} + +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { + unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + signedManifest, err := manifest.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + payload, err := signedManifest.Payload() + checkErr(t, err, "getting manifest payload") + + dgst, err := digest.FromBytes(payload) + checkErr(t, err, "digesting manifest") + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go new file mode 100644 index 00000000..f60290d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go @@ -0,0 +1,800 @@ +package handlers + +import ( + cryptorand "crypto/rand" + "expvar" + "fmt" + "math/rand" + "net" + "net/http" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/garyburd/redigo/redis" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + context.Context + + Config configuration.Configuration + + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. + accessController auth.AccessController // main access controller for application + + // events contains notification related configuration. + events struct { + sink notifications.Sink + source notifications.SourceRecord + } + + redis *redis.Pool + + // true if this registry is configured as a pull through cache + isCache bool +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(ctx context.Context, configuration configuration.Configuration) *App { + app := &App{ + Config: configuration, + Context: ctx, + router: v2.RouterWithPrefix(configuration.HTTP.Prefix), + isCache: configuration.Proxy.RemoteURL != "", + } + + app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + + var err error + app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + purgeConfig := uploadPurgeDefaultConfig() + if mc, ok := configuration.Storage["maintenance"]; ok { + for k, v := range mc { + switch k { + case "uploadpurging": + purgeConfig = v.(map[interface{}]interface{}) + } + } + + } + + startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) + + app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) + if err != nil { + panic(err) + } + + app.configureSecret(&configuration) + app.configureEvents(&configuration) + app.configureRedis(&configuration) + app.configureLogHook(&configuration) + + // configure deletion + var deleteEnabled bool + if d, ok := configuration.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok = e.(bool); !ok { + deleteEnabled = false + } + } + } + + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := configuration.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } + } + + // configure storage caches + if cc, ok := configuration.Storage["cache"]; ok { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled, app.isCache) + ctxu.GetLogger(app).Infof("using redis blob descriptor cache") + case "inmemory": + app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled, app.isCache) + ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") + default: + if v != "" { + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled, app.isCache) + } + + app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) + if err != nil { + panic(err) + } + + authType := configuration.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + ctxu.GetLogger(app).Debugf("configured %q access controller", authType) + } + + // configure as a pull through cache + if configuration.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) + } + + return app +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// configureEvents prepares the event sink for action. +func (app *App) configureEvents(configuration *configuration.Configuration) { + // Configure all of the endpoint sinks. + var sinks []notifications.Sink + for _, endpoint := range configuration.Notifications.Endpoints { + if endpoint.Disabled { + ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) + continue + } + + ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ + Timeout: endpoint.Timeout, + Threshold: endpoint.Threshold, + Backoff: endpoint.Backoff, + Headers: endpoint.Headers, + }) + + sinks = append(sinks, endpoint) + } + + // NOTE(stevvooe): Moving to a new queueing implementation is as easy as + // replacing broadcaster with a rabbitmq implementation. It's recommended + // that the registry instances also act as the workers to keep deployment + // simple. + app.events.sink = notifications.NewBroadcaster(sinks...) + + // Populate registry event source + hostname, err := os.Hostname() + if err != nil { + hostname = configuration.HTTP.Addr + } else { + // try to pick the port off the config + _, port, err := net.SplitHostPort(configuration.HTTP.Addr) + if err == nil { + hostname = net.JoinHostPort(hostname, port) + } + } + + app.events.source = notifications.SourceRecord{ + Addr: hostname, + InstanceID: ctxu.GetStringValue(app, "instance.id"), + } +} + +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + ctxu.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) + + done := func(err error) { + logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", + ctxu.Since(ctx, "redis.connect.startedat")) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) +} + +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + entry, ok := ctxu.GetLogger(app).(*log.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() // ensure that request body is always closed. + + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) + + defer func() { + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + ctxu.GetResponseLogger(ctx).Infof("response completed") + } + }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } + + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + app.router.ServeHTTP(w, r) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + context := app.context(w, r) + + if err := app.authorized(w, r, context); err != nil { + ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) + return + } + + // Add username to request logging + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + + if app.nameRequired(r) { + repository, err := app.registry.Repository(context, getName(context)) + + if err != nil { + ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) + + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) + case distribution.ErrRepositoryNameInvalid: + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + } + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + + // assign and decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + repository, + app.eventBridge(context, r)) + + context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) + if err != nil { + ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + } + + dispatch(context, r).ServeHTTP(w, r) + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + + app.logError(context, context.Errors) + } + }) +} + +func (app *App) logError(context context.Context, errors errcode.Errors) { + for _, e1 := range errors { + var c ctxu.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = ctxu.WithValue(context, "err.code", e.Code) + c = ctxu.WithValue(c, "err.message", e.Code.Message()) + c = ctxu.WithValue(c, "err.detail", e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = ctxu.WithValue(context, "err.code", e) + c = ctxu.WithValue(c, "err.message", e.Message()) + default: + // just normal go 'error' + c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, "err.message", e1.Error()) + } + + c = ctxu.WithLogger(c, ctxu.GetLogger(c, + "err.code", + "err.message", + "err.detail")) + ctxu.GetResponseLogger(c).Errorf("response completed with error") + } +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { + ctx := defaultContextManager.context(app, w, r) + ctx = ctxu.WithVars(ctx, r) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, + "vars.name", + "vars.reference", + "vars.digest", + "vars.uuid")) + + context := &Context{ + App: app, + Context: ctx, + urlBuilder: v2.NewURLBuilderFromRequest(r), + } + + return context +} + +// authorized checks if the request can proceed with access to the requested +// repository. If it succeeds, the context may access the requested +// repository. An error will be returned if access is not available. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + ctxu.GetLogger(context).Debug("authorizing request") + repo := getName(context) + + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if repo != "" { + accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + } else { + // Only allow the name not to be set on the base route. + if app.nameRequired(r) { + // For this to be properly secured, repo must always be set for a + // resource that may make a modification. The only condition under + // which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making + // that mistake elsewhere in the code, allowing any operation to + // proceed. + if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return fmt.Errorf("forbidden: no repository name") + } + accessRecords = appendCatalogAccessRecord(accessRecords, r) + } + + ctx, err := app.accessController.Authorized(context.Context, accessRecords...) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + // Add the appropriate WWW-Auth header + err.SetHeaders(w) + + if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context + // should be replaced by another, rather than replacing the context on a + // mutable object. + context.Context = ctx + return nil +} + +// eventBridge returns a bridge for the current request, configured with the +// correct actor and source. +func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { + actor := notifications.ActorRecord{ + Name: getUserName(ctx, r), + } + request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) + + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) +} + +// nameRequired returns true if the route requires a name. +func (app *App) nameRequired(r *http.Request) bool { + route := mux.CurrentRoute(r) + routeName := route.GetName() + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} + +// appendAccessRecords checks the method and adds the appropriate Access records to the records list. +func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { + resource := auth.Resource{ + Type: "repository", + Name: repo, + } + + switch method { + case "GET", "HEAD": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + records = append(records, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return records +} + +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + +// applyRegistryMiddleware wraps a registry instance with the configured middlewares +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { + for _, mw := range middlewares { + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) + if err != nil { + return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) + } + registry = rmw + } + return registry, nil + +} + +// applyRepoMiddleware wraps a repository with the configured middlewares +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { + for _, mw := range middlewares { + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) + if err != nil { + return nil, err + } + repository = rmw + } + return repository, nil +} + +// applyStorageMiddleware wraps a storage driver with the configured middlewares +func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { + for _, mw := range middlewares { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) + if err != nil { + return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) + } + driver = smw + } + return driver, nil +} + +// uploadPurgeDefaultConfig provides a default configuration for upload +// purging to be used in the absence of configuration in the +// confifuration file +func uploadPurgeDefaultConfig() map[interface{}]interface{} { + config := map[interface{}]interface{}{} + config["enabled"] = true + config["age"] = "168h" + config["interval"] = "24h" + config["dryrun"] = false + return config +} + +func badPurgeUploadConfig(reason string) { + panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) +} + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { + if config["enabled"] == false { + return + } + + var purgeAgeDuration time.Duration + var err error + purgeAge, ok := config["age"] + if ok { + ageStr, ok := purgeAge.(string) + if !ok { + badPurgeUploadConfig("age is not a string") + } + purgeAgeDuration, err = time.ParseDuration(ageStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) + } + } else { + badPurgeUploadConfig("age missing") + } + + var intervalDuration time.Duration + interval, ok := config["interval"] + if ok { + intervalStr, ok := interval.(string) + if !ok { + badPurgeUploadConfig("interval is not a string") + } + + intervalDuration, err = time.ParseDuration(intervalStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) + } + } else { + badPurgeUploadConfig("interval missing") + } + + var dryRunBool bool + dryRun, ok := config["dryrun"] + if ok { + dryRunBool, ok = dryRun.(bool) + if !ok { + badPurgeUploadConfig("cannot parse dryrun") + } + } else { + badPurgeUploadConfig("dryrun missing") + } + + go func() { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + log.Infof("Starting upload purge in %s", intervalDuration) + time.Sleep(intervalDuration) + } + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go new file mode 100644 index 00000000..6f597527 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go @@ -0,0 +1,277 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/silly" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "golang.org/x/net/context" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + driver := inmemory.New() + ctx := context.Background() + app := &App{ + Config: configuration.Configuration{}, + Context: ctx, + router: v2.Router(), + driver: driver, + registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true, false), + } + server := httptest.NewServer(app) + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Repository.Name() != getName(ctx) { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.Value(expectedK) != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.Value("vars").(map[string]string) { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "foo/bar", + "reference", "sometag", + }, + }, + { + endpoint: v2.RouteNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "foo/bar", + "digest", "tarsum.v1+bogus:abcdef0123456789", + }, + }, + { + endpoint: v2.RouteNameBlobUpload, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUploadChunk, + vars: []string{ + "name", "foo/bar", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + ctx := context.Background() + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": nil, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(ctx, config) + + server := httptest.NewServer(app) + builder, err := v2.NewURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { + t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) + } + + var errs errcode.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + err2, ok := errs[0].(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", errs[0]) + } + if err2.ErrorCode() != v2.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), v2.ErrorCodeUnauthorized) + } +} + +// Test the access record accumulator +func TestAppendAccessRecords(t *testing.T) { + repo := "testRepo" + + expectedResource := auth.Resource{ + Type: "repository", + Name: repo, + } + + expectedPullRecord := auth.Access{ + Resource: expectedResource, + Action: "pull", + } + expectedPushRecord := auth.Access{ + Resource: expectedResource, + Action: "push", + } + expectedAllRecord := auth.Access{ + Resource: expectedResource, + Action: "*", + } + + records := []auth.Access{} + result := appendAccessRecords(records, "GET", repo) + expectedResult := []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "HEAD", repo) + expectedResult = []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "POST", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PUT", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PATCH", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "DELETE", repo) + expectedResult = []auth.Access{expectedAllRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go new file mode 100644 index 00000000..8727a3cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go @@ -0,0 +1,11 @@ +// +build go1.4 + +package handlers + +import ( + "net/http" +) + +func basicAuth(r *http.Request) (username, password string, ok bool) { + return r.BasicAuth() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go new file mode 100644 index 00000000..6cf10a25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go @@ -0,0 +1,41 @@ +// +build !go1.4 + +package handlers + +import ( + "encoding/base64" + "net/http" + "strings" +) + +// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we +// can compile on go1.3 and earlier. + +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go new file mode 100644 index 00000000..b7c06ea2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go @@ -0,0 +1,93 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + "DELETE": http.HandlerFunc(blobHandler.DeleteBlob), + } +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) + } else { + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrBlobUnknown: + w.WriteHeader(http.StatusNotFound) + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + case distribution.ErrUnsupported: + w.WriteHeader(http.StatusMethodNotAllowed) + bh.Errors = append(bh.Errors, v2.ErrorCodeUnsupported) + default: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown) + } + return + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go new file mode 100644 index 00000000..1d1c1009 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go @@ -0,0 +1,327 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/url" + "os" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := http.Handler(handlers.MethodHandler{ + "POST": http.HandlerFunc(buh.StartBlobUpload), + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + "PATCH": http.HandlerFunc(buh.PatchBlobData), + "PUT": http.HandlerFunc(buh.PutBlobUploadComplete), + "DELETE": http.HandlerFunc(buh.CancelBlobUpload), + }) + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + }) + } + buh.Upload = upload + + if state.Offset > 0 { + // Seek the blob upload to the correct spot if it's non-zero. + // These error conditions should be rare and demonstrate really + // problems. We basically cancel the upload and tell the client to + // start over. + if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } else if nn != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } + } + + handler = closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh) + if err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + buh.Upload = upload + defer buh.Upload.Close() + + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) + return + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) + return + } + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the length and mediatype. For now, we can let the + // backend take care of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + default: + switch err { + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + default: + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + + // Build our canonical blob url + blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) + if err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + + var offset int64 + if !fresh { + var err error + offset, err = buh.Upload.Seek(0, os.SEEK_CUR) + if err != nil { + ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) + return err + } + } + + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Name() + buh.State.UUID = buh.Upload.ID() + buh.State.Offset = offset + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Name(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := offset + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go new file mode 100644 index 00000000..6ec1fe55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go @@ -0,0 +1,95 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + if err == io.EOF { + moreEntries = false + } else if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos[0:filled], + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go new file mode 100644 index 00000000..85a17123 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go @@ -0,0 +1,151 @@ +package handlers + +import ( + "fmt" + "net/http" + "sync" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "golang.org/x/net/context" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + context.Context + + // Repository is the repository for the current request. All requests + // should be scoped to a single repository. This field may be nil. + Repository distribution.Repository + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors errcode.Errors + + urlBuilder *v2.URLBuilder + + // TODO(stevvooe): The goal is too completely factor this context and + // dispatching out of the web application. Ideally, we should lean on + // context.Context for injection of these resources. +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +func getName(ctx context.Context) (name string) { + return ctxu.GetStringValue(ctx, "vars.name") +} + +func getReference(ctx context.Context) (reference string) { + return ctxu.GetStringValue(ctx, "vars.reference") +} + +var errDigestNotAvailable = fmt.Errorf("digest not available in context") + +func getDigest(ctx context.Context) (dgst digest.Digest, err error) { + dgstStr := ctxu.GetStringValue(ctx, "vars.digest") + + if dgstStr == "" { + ctxu.GetLogger(ctx).Errorf("digest not available") + return "", errDigestNotAvailable + } + + d, err := digest.ParseDigest(dgstStr) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + return "", err + } + + return d, nil +} + +func getUploadUUID(ctx context.Context) (uuid string) { + return ctxu.GetStringValue(ctx, "vars.uuid") +} + +// getUserName attempts to resolve a username from the context and request. If +// a username cannot be resolved, the empty string is returned. +func getUserName(ctx context.Context, r *http.Request) string { + username := ctxu.GetStringValue(ctx, "auth.user.name") + + // Fallback to request user with basic auth + if username == "" { + var ok bool + uname, _, ok := basicAuth(r) + if ok { + username = uname + } + } + + return username +} + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go new file mode 100644 index 00000000..1f9a8ee1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go @@ -0,0 +1,62 @@ +package handlers + +import ( + "errors" + "io" + "net/http" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} + +// copyFullPayload copies the payload of a HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + panic("the ResponseWriter does not implement CloseNotifier") + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + + ctxu.GetLogger(context).Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go new file mode 100644 index 00000000..1725d240 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go @@ -0,0 +1,72 @@ +package handlers + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. + Name string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 + + // StartedAt is the original start time of the upload. + StartedAt time.Time +} + +type hmacKey string + +// unpackUploadState unpacks and validates the blob upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, err + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, fmt.Errorf("Invalid token") + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, fmt.Errorf("Invalid token") + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go new file mode 100644 index 00000000..366c7279 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go @@ -0,0 +1,117 @@ +package handlers + +import "testing" + +var blobUploadStates = []blobUploadState{ + { + Name: "hello", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "hello-world", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "h3ll0_w0rld", + UUID: "abcd-1234-qwer-0987", + Offset: 1337, + }, + { + Name: "ABCDEFG", + UUID: "ABCD-1234-QWER-0987", + Offset: 1234567890, + }, + { + Name: "this-is-A-sort-of-Long-name-for-Testing", + UUID: "dead-1234-beef-0987", + Offset: 8675309, + }, +} + +var secrets = []string{ + "supersecret", + "12345", + "a", + "SuperSecret", + "Sup3r... S3cr3t!", + "This is a reasonably long secret key that is used for the purpose of testing.", + "\u2603+\u2744", // snowman+snowflake +} + +// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and +// validates that the tokens can be used to reconstruct the proper upload state. +func TestLayerUploadTokens(t *testing.T) { + secret := hmacKey("supersecret") + + for _, testcase := range blobUploadStates { + token, err := secret.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + } +} + +// TestHMACValidate ensures that any HMAC token providers are compatible if and +// only if they share the same secret. +func TestHMACValidation(t *testing.T) { + for _, secret := range secrets { + secret1 := hmacKey(secret) + secret2 := hmacKey(secret) + badSecret := hmacKey("DifferentSecret") + + for _, testcase := range blobUploadStates { + token, err := secret1.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret2.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + + _, err = badSecret.unpackUploadState(token) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) + } + + badToken, err := badSecret.packUploadState(lus) + if err != nil { + t.Fatal(err) + } + + _, err = secret1.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + + _, err = secret2.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + } + } +} + +func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { + if expected.Name != received.Name { + t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) + } + if expected.UUID != received.UUID { + t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) + } + if expected.Offset != received.Offset { + t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go new file mode 100644 index 00000000..7bbab4f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go new file mode 100644 index 00000000..dbe7b706 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go @@ -0,0 +1,251 @@ +package handlers + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" + "golang.org/x/net/context" +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + } + reference := getReference(ctx) + dgst, err := digest.ParseDigest(reference) + if err != nil { + // We just have a tag + imageManifestHandler.Tag = reference + } else { + imageManifestHandler.Digest = dgst + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), + "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), + } +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + // One of tag or digest gets set, depending on what is present in context. + Tag string + Digest digest.Digest +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("GetImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var sm *manifest.SignedManifest + if imh.Tag != "" { + sm, err = manifests.GetByTag(imh.Tag) + } else { + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + sm, err = manifests.Get(imh.Digest) + } + + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + // Get the digest, if we don't already have it. + if imh.Digest == "" { + dgst, err := digestManifest(imh, sm) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + return + } + if etagMatch(r, dgst.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + imh.Digest = dgst + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) + w.Write(sm.Raw) +} + +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted + return true + } + } + return false +} + +// PutImageManifest validates and stores and image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("PutImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var jsonBuf bytes.Buffer + if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + var manifest manifest.SignedManifest + if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + + dgst, err := digestManifest(imh, &manifest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + return + } + + // Validate manifest tag or digest matches payload + if imh.Tag != "" { + if manifest.Tag != imh.Tag { + ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid) + return + } + + imh.Digest = dgst + } else if imh.Digest != "" { + if dgst != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + } + } else { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) + return + } + + if err := manifests.Put(&manifest); err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + switch err := err.(type) { + case distribution.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case distribution.ErrManifestBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(verificationError.Digest)) + case distribution.ErrManifestUnverified: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) + } + } + } + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + return + } + + // Construct a canonical url for the uploaded manifest. + location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) + if err != nil { + // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to + // happen. We'll log the error here but proceed as if it worked. Worst + // case, we set an empty location header. + ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + } + + w.Header().Set("Location", location) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// DeleteImageManifest removes the manifest with the given digest from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("DeleteImageManifest") + + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + w.WriteHeader(http.StatusNotFound) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, v2.ErrorCodeUnsupported) + w.WriteHeader(http.StatusMethodNotAllowed) + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + w.WriteHeader(http.StatusBadRequest) + return + } + } + + w.WriteHeader(http.StatusAccepted) +} + +// digestManifest takes a digest of the given manifest. This belongs somewhere +// better but we'll wait for a refactoring cycle to find that real somewhere. +func digestManifest(ctx context.Context, sm *manifest.SignedManifest) (digest.Digest, error) { + p, err := sm.Payload() + if err != nil { + if !strings.Contains(err.Error(), "missing signature key") { + ctxu.GetLogger(ctx).Errorf("error getting manifest payload: %v", err) + return "", err + } + + // NOTE(stevvooe): There are no signatures but we still have a + // payload. The request will fail later but this is not the + // responsibility of this part of the code. + p = sm.Raw + } + + dgst, err := digest.FromBytes(p) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error digesting manifest: %v", err) + return "", err + } + + return dgst, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go new file mode 100644 index 00000000..39244909 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go new file mode 100644 index 00000000..54725585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go @@ -0,0 +1,64 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + manifests, err := th.Repository.Manifests(th) + if err != nil { + th.Errors = append(th.Errors, err) + return + } + + tags, err := manifests.Tags() + if err != nil { + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) + default: + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Repository.Name(), + Tags: tags, + }); err != nil { + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go new file mode 100644 index 00000000..b93a7a63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go new file mode 100644 index 00000000..7535c6db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// InitFunc is the type of a RegistryMiddleware factory function and is +// used to register the constructor for different RegistryMiddleware backends. +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RegistryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RegistryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, registry, options) + } + } + + return nil, fmt.Errorf("no registry middleware registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go new file mode 100644 index 00000000..27b42aec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// InitFunc is the type of a RepositoryMiddleware factory function and is +// used to register the constructor for different RepositoryMiddleware backends. +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RepositoryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RepositoryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, repository, options) + } + } + + return nil, fmt.Errorf("no repository middleware registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go new file mode 100644 index 00000000..e4bec75a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -0,0 +1,54 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution/registry/client/auth" +) + +const tokenURL = "https://auth.docker.io/token" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +// ConfigureAuth authorizes with the upstream registry +func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { + if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { + return nil, err + } + + creds := map[string]userpass{ + tokenURL: { + username: username, + password: password, + }, + } + return credentials{creds: creds}, nil +} + +func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go new file mode 100644 index 00000000..b480a111 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -0,0 +1,214 @@ +package proxy + +import ( + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.BlobStore = proxyBlobStore{} + +type inflightBlob struct { + refCount int + bw distribution.BlobWriter +} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]*inflightBlob) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil && err != distribution.ErrBlobUnknown { + return err + } + + if err == nil { + proxyMetrics.BlobPush(uint64(desc.Size)) + return pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + desc, err = pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return err + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return err + } + + bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) + if err != nil { + return err + } + defer cleanup() + + if isNew { + go func() { + err := streamToStorage(ctx, remoteReader, desc, bw) + if err != nil { + context.GetLogger(ctx).Error(err) + } + + proxyMetrics.BlobPull(uint64(desc.Size)) + }() + err := streamToClient(ctx, w, desc, bw) + if err != nil { + return err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + pbs.scheduler.AddBlob(dgst.String(), blobTTL) + return nil + } + + err = streamToClient(ctx, w, desc, bw) + if err != nil { + return err + } + proxyMetrics.BlobPush(uint64(desc.Size)) + return nil +} + +type cleanupFunc func() + +// getOrCreateBlobWriter will track which blobs are currently being downloaded and enable client requesting +// the same blob concurrently to read from the existing stream. +func getOrCreateBlobWriter(ctx context.Context, blobs distribution.BlobService, desc distribution.Descriptor) (distribution.BlobWriter, bool, cleanupFunc, error) { + mu.Lock() + defer mu.Unlock() + dgst := desc.Digest + + cleanup := func() { + mu.Lock() + defer mu.Unlock() + inflight[dgst].refCount-- + + if inflight[dgst].refCount == 0 { + defer delete(inflight, dgst) + _, err := inflight[dgst].bw.Commit(ctx, desc) + if err != nil { + // There is a narrow race here where Commit can be called while this blob's TTL is expiring + // and its being removed from storage. In that case, the client stream will continue + // uninterruped and the blob will be pulled through on the next request, so just log it + context.GetLogger(ctx).Errorf("Error committing blob: %q", err) + } + + } + } + + var bw distribution.BlobWriter + _, ok := inflight[dgst] + if ok { + bw = inflight[dgst].bw + inflight[dgst].refCount++ + return bw, false, cleanup, nil + } + + var err error + bw, err = blobs.Create(ctx) + if err != nil { + return nil, false, nil, err + } + + inflight[dgst] = &inflightBlob{refCount: 1, bw: bw} + return bw, true, cleanup, nil +} + +func streamToStorage(ctx context.Context, remoteReader distribution.ReadSeekCloser, desc distribution.Descriptor, bw distribution.BlobWriter) error { + _, err := io.CopyN(bw, remoteReader, desc.Size) + if err != nil { + return err + } + + return nil +} + +func streamToClient(ctx context.Context, w http.ResponseWriter, desc distribution.Descriptor, bw distribution.BlobWriter) error { + setResponseHeaders(w, desc.Size, desc.MediaType, desc.Digest) + + reader, err := bw.Reader() + if err != nil { + return err + } + defer reader.Close() + teeReader := io.TeeReader(reader, w) + buf := make([]byte, 32768, 32786) + var soFar int64 + for { + rd, err := teeReader.Read(buf) + if err == nil || err == io.EOF { + soFar += int64(rd) + if soFar < desc.Size { + // buffer underflow, keep trying + continue + } + return nil + } + return err + } +} + +func (pbs proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +// Unsupported functions +func (pbs proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go new file mode 100644 index 00000000..65d5f922 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go @@ -0,0 +1,231 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type statsBlobStore struct { + stats map[string]int + blobs distribution.BlobStore +} + +func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbs.stats["put"]++ + return sbs.blobs.Put(ctx, mediaType, p) +} + +func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbs.stats["get"]++ + return sbs.blobs.Get(ctx, dgst) +} + +func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + sbs.stats["create"]++ + return sbs.blobs.Create(ctx) +} + +func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbs.stats["resume"]++ + return sbs.blobs.Resume(ctx, id) +} + +func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbs.stats["open"]++ + return sbs.blobs.Open(ctx, dgst) +} + +func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbs.stats["serveblob"]++ + return sbs.blobs.ServeBlob(ctx, w, r, dgst) +} + +func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + sbs.stats["stat"]++ + return sbs.blobs.Stat(ctx, dgst) +} + +func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbs.stats["delete"]++ + return sbs.blobs.Delete(ctx, dgst) +} + +type testEnv struct { + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context +} + +func (te testEnv) LocalStats() *map[string]int { + ls := te.store.localStore.(statsBlobStore).stats + return &ls +} + +func (te testEnv) RemoteStats() *map[string]int { + rs := te.store.remoteStore.(statsBlobStore).stats + return &rs +} + +// Populate remote store and record the digests +func makeTestEnv(t *testing.T, name string) testEnv { + ctx := context.Background() + + localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) + localRepo, err := localRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) + truthRepo, err := truthRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: truthRepo.Blobs(ctx), + } + + localBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: localRepo.Blobs(ctx), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + + proxyBlobStore := proxyBlobStore{ + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + } + + te := testEnv{ + store: proxyBlobStore, + ctx: ctx, + } + return te +} + +func populate(t *testing.T, te *testEnv, blobCount int) { + var inRemote []distribution.Descriptor + for i := 0; i < blobCount; i++ { + bytes := []byte(fmt.Sprintf("blob%d", i)) + + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Errorf("Put in store") + } + inRemote = append(inRemote, desc) + } + + te.inRemote = inRemote + +} + +func TestProxyStoreStat(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 + populate(t, &te, remoteBlobCount) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Stat - touches both stores + for _, d := range te.inRemote { + _, err := te.store.Stat(te.ctx, d.Digest) + if err != nil { + t.Fatalf("Error stating proxy store") + } + } + + if (*localStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected local stat count") + } + + if (*remoteStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected remote stat count") + } +} + +func TestProxyStoreServe(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 + populate(t, &te, remoteBlobCount) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Serveblob - pulls through blobs + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl, err := digest.FromBytes(w.Body.Bytes()) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + if (*localStats)["stat"] != remoteBlobCount && (*localStats)["create"] != remoteBlobCount { + t.Fatalf("unexpected local stats") + } + if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { + t.Fatalf("unexpected local stats") + } + + // Serveblob - blobs come from local + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl, err := digest.FromBytes(w.Body.Bytes()) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + // Stat to find local, but no new blobs were created + if (*localStats)["stat"] != remoteBlobCount*2 && (*localStats)["create"] != remoteBlobCount*2 { + t.Fatalf("unexpected local stats") + } + + // Remote unchanged + if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { + fmt.Printf("\tlocal=%#v, \n\tremote=%#v\n", localStats, remoteStats) + t.Fatalf("unexpected local stats") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go new file mode 100644 index 00000000..5b79c8ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -0,0 +1,155 @@ +package proxy + +import ( + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName string + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.Exists(dgst) +} + +func (pms proxyManifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm, err := pms.localManifests.Get(dgst) + if err == nil { + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + return sm, err + } + + sm, err = pms.remoteManifests.Get(dgst) + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPull(uint64(len(sm.Raw))) + err = pms.localManifests.Put(sm) + if err != nil { + return nil, err + } + + // Schedule the repo for removal + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + // Ensure the manifest blob is cleaned up + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + + return sm, err +} + +func (pms proxyManifestStore) Tags() ([]string, error) { + return pms.localManifests.Tags() +} + +func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { + exists, err := pms.localManifests.ExistsByTag(tag) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.ExistsByTag(tag) +} + +func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + var localDigest digest.Digest + + localManifest, err := pms.localManifests.GetByTag(tag, options...) + switch err.(type) { + case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision: + goto fromremote + case nil: + break + default: + return nil, err + } + + localDigest, err = manifestDigest(localManifest) + if err != nil { + return nil, err + } + +fromremote: + var sm *manifest.SignedManifest + sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) + if err != nil { + return nil, err + } + + if sm == nil { + context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) + return localManifest, nil + } + context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String()) + + err = pms.localManifests.Put(sm) + if err != nil { + return nil, err + } + + dgst, err := manifestDigest(sm) + if err != nil { + return nil, err + } + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + proxyMetrics.ManifestPull(uint64(len(sm.Raw))) + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + + return sm, err +} + +func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { + payload, err := sm.Payload() + if err != nil { + return "", err + + } + + dgst, err := digest.FromBytes(payload) + if err != nil { + return "", err + } + + return dgst, nil +} + +func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { + return v2.ErrorCodeUnsupported +} + +func (pms proxyManifestStore) Delete(dgst digest.Digest) error { + return v2.ErrorCodeUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go new file mode 100644 index 00000000..7b9b8091 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go @@ -0,0 +1,235 @@ +package proxy + +import ( + "io" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type statsManifest struct { + manifests distribution.ManifestService + stats map[string]int +} + +type manifestStoreTestEnv struct { + manifestDigest digest.Digest // digest of the signed manifest in the local storage + manifests proxyManifestStore +} + +func (te manifestStoreTestEnv) LocalStats() *map[string]int { + ls := te.manifests.localManifests.(statsManifest).stats + return &ls +} + +func (te manifestStoreTestEnv) RemoteStats() *map[string]int { + rs := te.manifests.remoteManifests.(statsManifest).stats + return &rs +} + +func (sm statsManifest) Delete(dgst digest.Digest) error { + sm.stats["delete"]++ + return sm.manifests.Delete(dgst) +} + +func (sm statsManifest) Exists(dgst digest.Digest) (bool, error) { + sm.stats["exists"]++ + return sm.manifests.Exists(dgst) +} + +func (sm statsManifest) ExistsByTag(tag string) (bool, error) { + sm.stats["existbytag"]++ + return sm.manifests.ExistsByTag(tag) +} + +func (sm statsManifest) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm.stats["get"]++ + return sm.manifests.Get(dgst) +} + +func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + sm.stats["getbytag"]++ + return sm.manifests.GetByTag(tag, options...) +} + +func (sm statsManifest) Put(manifest *manifest.SignedManifest) error { + sm.stats["put"]++ + return sm.manifests.Put(manifest) +} + +func (sm statsManifest) Tags() ([]string, error) { + sm.stats["tags"]++ + return sm.manifests.Tags() +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + ctx := context.Background() + truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) + truthRepo, err := truthRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + tr, err := truthRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + truthManifests := statsManifest{ + manifests: tr, + stats: make(map[string]int), + } + + manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) + if err != nil { + t.Fatalf(err.Error()) + } + + localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) + localRepo, err := localRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + lr, err := localRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + localManifests := statsManifest{ + manifests: lr, + stats: make(map[string]int), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + return &manifestStoreTestEnv{ + manifestDigest: manifestDigest, + manifests: proxyManifestStore{ + ctx: ctx, + localManifests: localManifests, + remoteManifests: truthManifests, + scheduler: s, + }, + } +} + +func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + for i := 0; i < 2; i++ { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + rs, ts, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ts) + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + ms, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + ms.Put(sm) + if err != nil { + t.Fatalf("unexpected errors putting manifest: %v", err) + } + pl, err := sm.Payload() + if err != nil { + t.Fatal(err) + } + return digest.FromBytes(pl) +} + +// TestProxyManifests contains basic acceptance tests +// for the pull-through behavior +func TestProxyManifests(t *testing.T) { + name := "foo/bar" + env := newManifestStoreTestEnv(t, name, "latest") + + localStats := env.LocalStats() + remoteStats := env.RemoteStats() + + // Stat - must check local and remote + exists, err := env.manifests.ExistsByTag("latest") + if err != nil { + t.Fatalf("Error checking existance") + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["existbytag"] != 1 && (*remoteStats)["existbytag"] != 1 { + t.Errorf("Unexpected exists count") + } + + // Get - should succeed and pull manifest into local + _, err = env.manifests.Get(env.manifestDigest) + if err != nil { + t.Fatal(err) + } + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected get count") + } + + if (*localStats)["put"] != 1 { + t.Errorf("Expected local put") + } + + // Stat - should only go to local + exists, err = env.manifests.ExistsByTag("latest") + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["existbytag"] != 2 && (*remoteStats)["existbytag"] != 1 { + t.Errorf("Unexpected exists count") + + } + + // Get - should get from remote, to test freshness + _, err = env.manifests.Get(env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if (*remoteStats)["get"] != 2 && (*remoteStats)["existsbytag"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected get count") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go new file mode 100644 index 00000000..d3d84d78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go new file mode 100644 index 00000000..e9dec2f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -0,0 +1,139 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + + scheduler *scheduler.TTLExpirationScheduler + + remoteURL string + credentialStore auth.CredentialStore + challengeManager auth.ChallengeManager +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + _, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(digest string) error { + return v.RemoveBlob(digest) + }) + s.OnManifestExpire(func(repoName string) error { + return v.RemoveRepository(repoName) + }) + err = s.Start() + if err != nil { + return nil, err + } + + challengeManager := auth.NewSimpleChallengeManager() + cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + challengeManager: challengeManager, + credentialStore: cs, + remoteURL: config.RemoteURL, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + }, + manifests: proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + }, + name: name, + signatures: localRepo.Signatures(), + }, nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name string + signatures distribution.SignatureService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // options + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Name() string { + return pr.name +} + +func (pr *proxiedRepository) Signatures() distribution.SignatureService { + return pr.signatures +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go new file mode 100644 index 00000000..056b148a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go @@ -0,0 +1,250 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repositories' TTL expires +type expiryFunc func(string) error + +const ( + entryTypeBlob = iota + entryTypeManifest +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]schedulerEntry), + addChan: make(chan schedulerEntry), + stopChan: make(chan bool), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + entries map[string]schedulerEntry + addChan chan schedulerEntry + stopChan chan bool + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc +} + +// addChan allows more TTLs to be pushed to the scheduler +type addChan chan schedulerEntry + +// stopChan allows the scheduler to be stopped - used for testing. +type stopChan chan bool + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + ttles.add(dgst, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(repoName, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + return ttles.start() +} + +func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { + entry := schedulerEntry{ + Key: key, + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + ttles.addChan <- entry +} + +func (ttles *TTLExpirationScheduler) stop() { + ttles.stopChan <- true +} + +func (ttles *TTLExpirationScheduler) start() error { + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + go ttles.mainloop() + + return nil +} + +// mainloop uses a select statement to listen for events. Most of its time +// is spent in waiting on a TTL to expire but can be interrupted when TTLs +// are added. +func (ttles *TTLExpirationScheduler) mainloop() { + for { + if ttles.stopped { + return + } + + nextEntry, ttl := nextExpiringEntry(ttles.entries) + if len(ttles.entries) == 0 { + context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Nothing to do, sleeping...") + } else { + context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Sleeping for %s until cleanup of %s", ttl, nextEntry.Key) + } + + select { + case <-time.After(ttl): + var f expiryFunc + + switch nextEntry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(repoName string) error { + return fmt.Errorf("Unexpected scheduler entry type") + } + } + + if err := f(nextEntry.Key); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", nextEntry.Key, err) + } + + delete(ttles.entries, nextEntry.Key) + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + case entry := <-ttles.addChan: + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + ttles.entries[entry.Key] = entry + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + break + + case <-ttles.stopChan: + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + ttles.stopped = true + } + } +} + +func nextExpiringEntry(entries map[string]schedulerEntry) (*schedulerEntry, time.Duration) { + if len(entries) == 0 { + return nil, 24 * time.Hour + } + + // todo:(richardscothern) this is a primitive o(n) algorithm + // but n will never be *that* big and it's all in memory. Investigate + // time.AfterFunc for heap based expiries + + first := true + var nextEntry schedulerEntry + for _, entry := range entries { + if first { + nextEntry = entry + first = false + continue + } + if entry.Expiry.Before(nextEntry.Expiry) { + nextEntry = entry + } + } + + // Dates may be from the past if the scheduler has + // been restarted, set their ttl to 0 + if nextEntry.Expiry.Before(time.Now()) { + nextEntry.Expiry = time.Now() + return &nextEntry, 0 + } + + return &nextEntry, nextEntry.Expiry.Sub(time.Now()) +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go new file mode 100644 index 00000000..fb5479f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go @@ -0,0 +1,165 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestSchedule(t *testing.T) { + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + "testBlob1": true, + "testBlob2": true, + "ch00": true, + } + + s := New(context.Background(), inmemory.New(), "/ttl") + deleteFunc := func(repoName string) error { + if len(remainingRepos) == 0 { + t.Fatalf("Incorrect expiry count") + } + _, ok := remainingRepos[repoName] + if !ok { + t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + } + fmt.Println("removing", repoName) + delete(remainingRepos, repoName) + + return nil + } + s.onBlobExpire = deleteFunc + err := s.start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + s.add("testBlob1", 3*timeUnit, entryTypeBlob) + s.add("testBlob2", 1*timeUnit, entryTypeBlob) + + func() { + s.add("ch00", 1*timeUnit, entryTypeBlob) + + }() + + // Ensure all repos are deleted + <-time.After(50 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestRestoreOld(t *testing.T) { + remainingRepos := map[string]bool{ + "testBlob1": true, + "oldRepo": true, + } + + deleteFunc := func(repoName string) error { + if repoName == "oldRepo" && len(remainingRepos) == 3 { + t.Errorf("oldRepo should be removed first") + } + _, ok := remainingRepos[repoName] + if !ok { + t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + } + delete(remainingRepos, repoName) + return nil + } + + timeUnit := time.Millisecond + serialized, err := json.Marshal(&map[string]schedulerEntry{ + "testBlob1": { + Expiry: time.Now().Add(1 * timeUnit), + Key: "testBlob1", + EntryType: 0, + }, + "oldRepo": { + Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first + Key: "oldRepo", + EntryType: 0, + }, + }) + if err != nil { + t.Fatalf("Error serializing test data: %s", err.Error()) + } + + ctx := context.Background() + pathToStatFile := "/ttl" + fs := inmemory.New() + err = fs.PutContent(ctx, pathToStatFile, serialized) + if err != nil { + t.Fatal("Unable to write serialized data to fs") + } + s := New(context.Background(), fs, "/ttl") + s.onBlobExpire = deleteFunc + err = s.start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + <-time.After(50 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestStopRestore(t *testing.T) { + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + "testBlob1": true, + "testBlob2": true, + } + deleteFunc := func(repoName string) error { + delete(remainingRepos, repoName) + return nil + } + + fs := inmemory.New() + pathToStateFile := "/ttl" + s := New(context.Background(), fs, pathToStateFile) + s.onBlobExpire = deleteFunc + + err := s.start() + if err != nil { + t.Fatalf(err.Error()) + } + s.add("testBlob1", 300*timeUnit, entryTypeBlob) + s.add("testBlob2", 100*timeUnit, entryTypeBlob) + + // Start and stop before all operations complete + // state will be written to fs + s.stop() + time.Sleep(10 * time.Millisecond) + + // v2 will restore state from fs + s2 := New(context.Background(), fs, pathToStateFile) + s2.onBlobExpire = deleteFunc + err = s2.start() + if err != nil { + t.Fatalf("Error starting v2: %s", err.Error()) + } + + <-time.After(500 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } + +} + +func TestDoubleStart(t *testing.T) { + s := New(context.Background(), inmemory.New(), "/ttl") + err := s.start() + if err != nil { + t.Fatalf("Unable to start scheduler") + } + fmt.Printf("%#v", s) + err = s.start() + if err == nil { + t.Fatalf("Scheduler started twice without error") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go new file mode 100644 index 00000000..e5cfa83e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go @@ -0,0 +1,407 @@ +package storage + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" +) + +// TestSimpleBlobUpload covers the blob upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleBlobUpload(t *testing.T) { + randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + dgst := digest.Digest(tarSumStr) + if err != nil { + t.Fatalf("error allocating upload store: %v", err) + } + + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := blobUpload.Cancel(ctx); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // Do a resume, get unknown upload + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) + } + + // Restart! + blobUpload, err = bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + offset, err := blobUpload.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("unexpected error seeking layer upload: %v", err) + } + + if offset != nn { + t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) + } + blobUpload.Close() + + // Do a resume, for good fun + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + sha256Digest := digest.NewDigest("sha256", h) + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // After finishing an upload, it should no longer exist. + if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + statDesc, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h.Reset() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) + } + + // Delete a blob + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + _, err = bs.Open(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected success opening deleted blob for read") + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type getting deleted manifest: %#v", err) + } + + // Re-upload the blob + randomBlob, err := ioutil.ReadAll(randomDataReader) + if err != nil { + t.Fatalf("Error reading all of blob %s", err.Error()) + } + expectedDigest, err := digest.FromBytes(randomBlob) + if err != nil { + t.Fatalf("Error getting digest from bytes: %s", err) + } + simpleUpload(t, bs, randomBlob, expectedDigest) + + d, err = bs.Stat(ctx, expectedDigest) + if err != nil { + t.Errorf("unexpected error stat-ing blob") + } + if d.Digest != expectedDigest { + t.Errorf("Mismatching digest with restored blob") + } + + _, err = bs.Open(ctx, expectedDigest) + if err != nil { + t.Errorf("Unexpected error opening blob") + } + + // Reuse state to test delete with a delete-disabled registry + registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + repository, err = registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs = repository.Blobs(ctx) + err = bs.Delete(ctx, desc.Digest) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestSimpleBlobRead just creates a simple blob file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleBlobRead(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + dgst := digest.Digest(tarSumStr) + + // Test for existence. + desc, err := bs.Stat(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when testing for existence: %v", err) + } + + rc, err := bs.Open(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when opening non-existent blob: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} + t.Logf("desc: %v", descBefore) + + desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) + if err != nil { + t.Fatalf("error adding blob to blobservice: %v", err) + } + + if desc.Size != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) + } + + rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + if err != nil { + t.Fatalf("error opening blob with %v: %v", dgst, err) + } + defer rc.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, rc) + if err != nil { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) + } + + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != desc.Digest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) + } + + // Now seek back the blob, read the whole thing and check against randomLayerData + offset, err := rc.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking blob: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("error reading all of blob: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) +} + +func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { + ctx := context.Background() + wr, err := bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting upload: %v", err) + } + + nn, err := io.Copy(wr, bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error copying into blob writer: %v", err) + } + + if nn != 0 { + t.Fatalf("unexpected number of bytes copied: %v > 0", nn) + } + + dgst, err := digest.FromReader(bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error getting digest: %v", err) + } + + if dgst != expectedDigest { + // sanity check on zero digest + t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) + } + + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error committing write: %v", err) + } + + if desc.Digest != dgst { + t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) + } +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// addBlob simply consumes the reader and inserts into the blob service, +// returning a descriptor on success. +func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { + wr, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + defer wr.Cancel(ctx) + + if nn, err := io.Copy(wr, rd); err != nil { + return distribution.Descriptor{}, err + } else if nn != desc.Size { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) + } + + return wr.Commit(ctx, desc) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go new file mode 100644 index 00000000..fad0a77a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go new file mode 100644 index 00000000..24aeba69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go @@ -0,0 +1,79 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + + switch err { + case nil: + if bs.redirect { + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + } + + fallthrough + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil + } + + // Some unexpected error. + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go new file mode 100644 index 00000000..724617f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go @@ -0,0 +1,198 @@ +package storage + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. +type blobStore struct { + driver driver.StorageDriver + pm *pathMapper + statter distribution.BlobStatter +} + +var _ distribution.BlobProvider = &blobStore{} + +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + p, err := bs.driver.GetContent(ctx, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err + } + + return p, err +} + +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst, err := digest.FromBytes(p) + if err != nil { + context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p)) + return distribution.Descriptor{}, err + } + + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err + } + + bp, err := bs.path(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype here, as well. + + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := bs.pm.path(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver + pm *pathMapper +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := bs.pm.path(blobDataPathSpec{ + digest: dgst, + }) + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + } + + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go new file mode 100644 index 00000000..2142c37f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go @@ -0,0 +1,379 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +// layerWriter is used to control the various aspects of resumable +// layer upload. It implements the LayerUpload interface. +type blobWriter struct { + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy + // LayerUpload Interface + bufferedFileWriter + + resumableDigestEnabled bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.bufferedFileWriter.Close(); err != nil { + return distribution.Descriptor{}, err + } + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + return canonical, nil +} + +// Rollback the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + if err := bw.removeResources(ctx); err != nil { + return err + } + + bw.Close() + return nil +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.err != nil { + return bw.err + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil { + return err + } + + return bw.bufferedFileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + // Stat the on disk file + if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + bw.size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != bw.size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = bw.size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigestAt(ctx, bw.size); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.New() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() + } + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[string]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := bw.blobStore.pm.path(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the zero tarsum. + if desc.Digest == digest.DigestSha256EmptyTar { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 00000000..39166876 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go new file mode 100644 index 00000000..a26ac2cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -0,0 +1,175 @@ +// +build !noresumabledigest + +package storage + +import ( + "fmt" + "io" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigestAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := bw.driver.Delete(ctx, hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired offset. + fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) + if err != nil { + return err + } + + if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) + } + + if _, err := io.CopyN(h, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 00000000..10a39091 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 00000000..94ca8a90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,101 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 00000000..120a6572 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,170 @@ +package memory + +import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if err := v2.ValidateRepositoryName(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if rsimbdcp.repository == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + if rsimbdcp.repository == nil { + return distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if rsimbdcp.repository == nil { + // allocate map since we are setting it now. + rsimbdcp.parent.mu.Lock() + var ok bool + // have to read back value since we may have allocated elsewhere. + rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + rsimbdcp.repository = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository + } + + rsimbdcp.parent.mu.Unlock() + } + + if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go new file mode 100644 index 00000000..3bae7ccb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go @@ -0,0 +1,13 @@ +package memory + +import ( + "testing" + + "github.com/docker/distribution/registry/storage/cache" +) + +// TestInMemoryBlobInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryBlobInfoCache(t *testing.T) { + cache.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go new file mode 100644 index 00000000..36370bdd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -0,0 +1,268 @@ +package redis + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" +) + +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ + pool: pool, + } +} + +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if err := v2.ValidateRepositoryName(repo); err != nil { + return nil, err + } + + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil +} + +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.stat(ctx, conn, dgst) +} + +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err + } + + return desc, nil +} + +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "size", desc.Size); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil +} + +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs tarsum). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go new file mode 100644 index 00000000..ed6944a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go @@ -0,0 +1,51 @@ +package redis + +import ( + "flag" + "os" + "testing" + "time" + + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" +) + +var redisAddr string + +func init() { + flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") +} + +// TestRedisLayerInfoCache exercises a live redis instance using the cache +// implementation. +func TestRedisBlobDescriptorCacheProvider(t *testing.T) { + if redisAddr == "" { + // fallback to an environement variable + redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") + } + + if redisAddr == "" { + // skip if still not set + t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis") + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", redisAddr) + }, + MaxIdle: 1, + MaxActive: 2, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + // Clear the database + if _, err := pool.Get().Do("FLUSHDB"); err != nil { + t.Fatalf("unexpected error flushing redis db: %v", err) + } + + cache.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go new file mode 100644 index 00000000..b5a2f643 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go @@ -0,0 +1,178 @@ +package cache + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// CheckBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { + ctx := context.Background() + + checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) + checkBlobDescriptorCacheSetAndRead(t, ctx, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") + if err == nil { + t.Fatalf("expected an error when asking for invalid repo") + } + + cache, err = provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repository: %v", err) + } + + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Size: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) + } + + if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{ + Digest: "", + Size: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") + } + + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) + } + + if _, err := cache.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } +} + +func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error deleting descriptor") + } + + nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + err = cache.Clear(ctx, nonExistantDigest) + if err == nil { + t.Fatalf("expected error deleting unknown descriptor") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go new file mode 100644 index 00000000..470894b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go @@ -0,0 +1,65 @@ +package storage + +import ( + "errors" + "io" + "path" + "sort" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var foundRepos []string + var errVal error + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + // Walk each of the directories in our storage. Unfortunately since there's no + // guarantee that storage will return files in lexigraphical order, we have + // to store everything another slice, sort it and then copy it back to our + // passed in slice. + + Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + + // lop the base path off + repoPath := filePath[len(root)+1:] + + _, file := path.Split(repoPath) + if file == "_layers" { + repoPath = strings.TrimSuffix(repoPath, "/_layers") + if repoPath > last { + foundRepos = append(foundRepos, repoPath) + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + return nil + }) + + sort.Strings(foundRepos) + n = copy(repos, foundRepos) + + // Signal that we have no more entries by setting EOF + if len(foundRepos) <= len(repos) { + errVal = io.EOF + } + + return n, errVal + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go new file mode 100644 index 00000000..1a1dbac5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go @@ -0,0 +1,122 @@ +package storage + +import ( + "io" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type setupEnv struct { + ctx context.Context + driver driver.StorageDriver + expected []string + registry distribution.Namespace +} + +func setupFS(t *testing.T) *setupEnv { + d := inmemory.New() + c := []byte("") + ctx := context.Background() + registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) + + repos := []string{ + "/foo/a/_layers/1", + "/foo/b/_layers/2", + "/bar/c/_layers/3", + "/bar/d/_layers/4", + "/foo/d/in/_layers/5", + "/an/invalid/repo", + "/bar/d/_layers/ignored/dir/6", + } + + for _, repo := range repos { + if err := d.PutContent(ctx, rootpath+repo, c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + } + + expected := []string{ + "bar/c", + "bar/d", + "foo/a", + "foo/b", + "foo/d/in", + } + + return &setupEnv{ + ctx: ctx, + driver: d, + expected: expected, + registry: registry, + } +} + +func TestCatalog(t *testing.T) { + env := setupFS(t) + + p := make([]string, 50) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + + if !testEq(p, env.expected, numFilled) { + t.Errorf("Expected catalog repos err") + } + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } +} + +func TestCatalogInParts(t *testing.T) { + env := setupFS(t) + + chunkLen := 2 + p := make([]string, chunkLen) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[0:chunkLen], numFilled) { + t.Errorf("Expected catalog first chunk err") + } + + lastRepo := p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { + t.Errorf("Expected catalog second chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } + + if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) { + t.Errorf("Expected catalog third chunk err") + } + +} + +func testEq(a, b []string, size int) bool { + for cnt := 0; cnt < size-1; cnt++ { + if a[cnt] != b[cnt] { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go new file mode 100644 index 00000000..387d9234 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go new file mode 100644 index 00000000..cbb95981 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -0,0 +1,366 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" + paramRealm = "realm" +) + +type driver struct { + client azure.BlobStorageClient + container string +} + +type baseEmbed struct{ base.Base } + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct{ baseEmbed } + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok || fmt.Sprint(accountName) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok || fmt.Sprint(accountKey) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseURL + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + return nil, err + } + + d := &driver{ + client: blobClient, + container: container} + return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil +} + +// Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { + return err + } + if err := d.client.CreateBlockBlob(d.container, path); err != nil { + return err + } + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + info, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + size := int64(info.ContentLength) + if offset >= size { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange) + if err != nil { + return nil, err + } + return resp, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + if blobExists, err := d.client.BlobExists(d.container, path); err != nil { + return 0, err + } else if !blobExists { + err := d.client.CreateBlockBlob(d.container, path) + if err != nil { + return 0, err + } + } + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + zw := newZeroFillWriter(&bw) + return zw.Write(d.container, path, offset, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // Check if the path is a blob + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if ok { + blob, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(blob.ContentLength), + ModTime: mtim, + IsDir: false, + }}, nil + } + + // Check if path is a virtual container + virtContainerPath := path + if !strings.HasSuffix(virtContainerPath, "/") { + virtContainerPath += "/" + } + blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Prefix: virtContainerPath, + MaxResults: 1, + }) + if err != nil { + return nil, err + } + if len(blobs.Blobs) > 0 { + // path is a virtual container + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + }}, nil + } + + // path is not a blob or virtual container + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// URLFor returns a publicly accessible URL for the blob stored at given path +// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration + expires, ok := options["expiry"] + if ok { + t, ok := expires.(time.Time) + if ok { + expiresTime = t + } + } + return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + for { + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + e, ok := err.(azure.AzureStorageServiceError) + return ok && e.StatusCode == http.StatusNotFound +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go new file mode 100644 index 00000000..4a0661b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go @@ -0,0 +1,63 @@ +package azure + +import ( + "fmt" + "os" + "strings" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +const ( + envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" + envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" + envContainer = "AZURE_STORAGE_CONTAINER" + envRealm = "AZURE_STORAGE_REALM" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + var ( + accountName string + accountKey string + container string + realm string + ) + + config := []struct { + env string + value *string + }{ + {envAccountName, &accountName}, + {envAccountKey, &accountKey}, + {envContainer, &container}, + {envRealm, &realm}, + } + + missing := []string{} + for _, v := range config { + *v.value = os.Getenv(v.env) + if *v.value == "" { + missing = append(missing, v.env) + } + } + + azureDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(accountName, accountKey, container, realm) + } + + // Skip Azure storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if len(missing) > 0 { + return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) + } + return "" + } + + testsuites.RegisterSuite(azureDriverConstructor, skipCheck) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go new file mode 100644 index 00000000..1c1df899 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go @@ -0,0 +1,24 @@ +package azure + +import ( + "fmt" + "io" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +// azureBlockStorage is adaptor between azure.BlobStorageClient and +// blockStorage interface. +type azureBlockStorage struct { + azure.BlobStorageClient +} + +func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { + return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) +} + +func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { + a := azureBlockStorage{} + a.BlobStorageClient = b + return a +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go new file mode 100644 index 00000000..7ce47195 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go @@ -0,0 +1,155 @@ +package azure + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +type StorageSimulator struct { + blobs map[string]*BlockBlob +} + +type BlockBlob struct { + blocks map[string]*DataBlock + blockList []string +} + +type DataBlock struct { + data []byte + committed bool +} + +func (s *StorageSimulator) path(container, blob string) string { + return fmt.Sprintf("%s/%s", container, blob) +} + +func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { + _, ok := s.blobs[s.path(container, blob)] + return ok, nil +} + +func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { + bb, ok := s.blobs[s.path(container, blob)] + if !ok { + return nil, fmt.Errorf("blob not found") + } + + var readers []io.Reader + for _, bID := range bb.blockList { + readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) + } + return ioutil.NopCloser(io.MultiReader(readers...)), nil +} + +func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { + r, err := s.GetBlob(container, blob) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil +} + +func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { + path := s.path(container, blob) + bb := &BlockBlob{ + blocks: make(map[string]*DataBlock), + blockList: []string{}, + } + s.blobs[path] = bb + return nil +} + +func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { + path := s.path(container, blob) + bb, ok := s.blobs[path] + if !ok { + return fmt.Errorf("blob not found") + } + data := make([]byte, len(chunk)) + copy(data, chunk) + bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob + return nil +} + +func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { + resp := azure.BlockListResponse{} + bb, ok := s.blobs[s.path(container, blob)] + if !ok { + return resp, fmt.Errorf("blob not found") + } + + // Iterate committed blocks (in order) + if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { + for _, blockID := range bb.blockList { + b := bb.blocks[blockID] + block := azure.BlockResponse{ + Name: blockID, + Size: int64(len(b.data)), + } + resp.CommittedBlocks = append(resp.CommittedBlocks, block) + } + + } + + // Iterate uncommitted blocks (in no order) + if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { + for blockID, b := range bb.blocks { + block := azure.BlockResponse{ + Name: blockID, + Size: int64(len(b.data)), + } + if !b.committed { + resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) + } + } + } + return resp, nil +} + +func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { + bb, ok := s.blobs[s.path(container, blob)] + if !ok { + return fmt.Errorf("blob not found") + } + + var blockIDs []string + for _, v := range blocks { + bl, ok := bb.blocks[v.ID] + if !ok { // check if block ID exists + return fmt.Errorf("Block id '%s' not found", v.ID) + } + bl.committed = true + blockIDs = append(blockIDs, v.ID) + } + + // Mark all other blocks uncommitted + for k, b := range bb.blocks { + inList := false + for _, v := range blockIDs { + if k == v { + inList = true + break + } + } + if !inList { + b.committed = false + } + } + + bb.blockList = blockIDs + return nil +} + +func NewStorageSimulator() StorageSimulator { + return StorageSimulator{ + blobs: make(map[string]*BlockBlob), + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go new file mode 100644 index 00000000..776c7cd5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go @@ -0,0 +1,60 @@ +package azure + +import ( + "encoding/base64" + "fmt" + "math/rand" + "sync" + "time" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +type blockIDGenerator struct { + pool map[string]bool + r *rand.Rand + m sync.Mutex +} + +// Generate returns an unused random block id and adds the generated ID +// to list of used IDs so that the same block name is not used again. +func (b *blockIDGenerator) Generate() string { + b.m.Lock() + defer b.m.Unlock() + + var id string + for { + id = toBlockID(int(b.r.Int())) + if !b.exists(id) { + break + } + } + b.pool[id] = true + return id +} + +func (b *blockIDGenerator) exists(id string) bool { + _, used := b.pool[id] + return used +} + +func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { + b.m.Lock() + defer b.m.Unlock() + + for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { + b.pool[bl.Name] = true + } +} + +func newBlockIDGenerator() *blockIDGenerator { + return &blockIDGenerator{ + pool: make(map[string]bool), + r: rand.New(rand.NewSource(time.Now().UnixNano()))} +} + +// toBlockId converts given integer to base64-encoded block ID of a fixed length. +func toBlockID(i int) string { + s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs + return base64.StdEncoding.EncodeToString([]byte(s)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go new file mode 100644 index 00000000..aab70202 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go @@ -0,0 +1,74 @@ +package azure + +import ( + "math" + "testing" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +func Test_blockIdGenerator(t *testing.T) { + r := newBlockIDGenerator() + + for i := 1; i <= 10; i++ { + if expected := i - 1; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + if id := r.Generate(); id == "" { + t.Fatal("returned empty id") + } + if expected := i; len(r.pool) != expected { + t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) + } + } +} + +func Test_blockIdGenerator_Feed(t *testing.T) { + r := newBlockIDGenerator() + if expected := 0; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + + // feed empty list + blocks := azure.BlockListResponse{} + r.Feed(blocks) + if expected := 0; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + + // feed blocks + blocks = azure.BlockListResponse{ + CommittedBlocks: []azure.BlockResponse{ + {"1", 1}, + {"2", 2}, + }, + UncommittedBlocks: []azure.BlockResponse{ + {"3", 3}, + }} + r.Feed(blocks) + if expected := 3; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + + // feed same block IDs with committed/uncommitted place changed + blocks = azure.BlockListResponse{ + CommittedBlocks: []azure.BlockResponse{ + {"3", 3}, + }, + UncommittedBlocks: []azure.BlockResponse{ + {"1", 1}, + }} + r.Feed(blocks) + if expected := 3; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } +} + +func Test_toBlockId(t *testing.T) { + min := 0 + max := math.MaxInt64 + + if len(toBlockID(min)) != len(toBlockID(max)) { + t.Fatalf("different-sized blockIDs are returned") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go new file mode 100644 index 00000000..f18692d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go @@ -0,0 +1,208 @@ +package azure + +import ( + "fmt" + "io" + "io/ioutil" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +// blockStorage is the interface required from a block storage service +// client implementation +type blockStorage interface { + CreateBlockBlob(container, blob string) error + GetBlob(container, blob string) (io.ReadCloser, error) + GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) + PutBlock(container, blob, blockID string, chunk []byte) error + GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) + PutBlockList(container, blob string, blocks []azure.Block) error +} + +// randomBlobWriter enables random access semantics on Azure block blobs +// by enabling writing arbitrary length of chunks to arbitrary write offsets +// within the blob. Normally, Azure Blob Storage does not support random +// access semantics on block blobs; however, this writer can download, split and +// reupload the overlapping blocks and discards those being overwritten entirely. +type randomBlobWriter struct { + bs blockStorage + blockSize int +} + +func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { + return randomBlobWriter{bs: bs, blockSize: blockSize} +} + +// WriteBlobAt writes the given chunk to the specified position of an existing blob. +// The offset must be equals to size of the blob or smaller than it. +func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { + rand := newBlockIDGenerator() + + blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) + if err != nil { + return 0, err + } + rand.Feed(blocks) // load existing block IDs + + // Check for write offset for existing blob + size := getBlobSize(blocks) + if offset < 0 || offset > size { + return 0, fmt.Errorf("wrong offset for Write: %v", offset) + } + + // Upload the new chunk as blocks + blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) + if err != nil { + return 0, err + } + + // For non-append operations, existing blocks may need to be splitted + if offset != size { + // Split the block on the left end (if any) + leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) + if err != nil { + return 0, err + } + blockList = append(leftBlocks, blockList...) + + // Split the block on the right end (if any) + rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) + if err != nil { + return 0, err + } + blockList = append(blockList, rightBlocks...) + } else { + // Use existing block list + var existingBlocks []azure.Block + for _, v := range blocks.CommittedBlocks { + existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) + } + blockList = append(existingBlocks, blockList...) + } + // Put block list + return nn, r.bs.PutBlockList(container, blob, blockList) +} + +func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { + blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) + if err != nil { + return 0, err + } + return getBlobSize(blocks), nil +} + +// writeChunkToBlocks writes given chunk to one or multiple blocks within specified +// blob and returns their block representations. Those blocks are not committed, yet +func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { + var newBlocks []azure.Block + var nn int64 + + // Read chunks of at most size N except the last chunk to + // maximize block size and minimize block count. + buf := make([]byte, r.blockSize) + for { + n, err := io.ReadFull(chunk, buf) + if err == io.EOF { + break + } + nn += int64(n) + data := buf[:n] + blockID := rand.Generate() + if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { + return newBlocks, nn, err + } + newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) + } + return newBlocks, nn, nil +} + +// blocksLeftSide returns the blocks that are going to be at the left side of +// the writeOffset: [0, writeOffset) by identifying blocks that will remain +// the same and splitting blocks and reuploading them as needed. +func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { + var left []azure.Block + bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) + if err != nil { + return left, err + } + + o := writeOffset + elapsed := int64(0) + for _, v := range bx.CommittedBlocks { + blkSize := int64(v.Size) + if o >= blkSize { // use existing block + left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) + o -= blkSize + elapsed += blkSize + } else if o > 0 { // current block needs to be splitted + start := elapsed + size := o + part, err := r.bs.GetSectionReader(container, blob, start, size) + if err != nil { + return left, err + } + newBlockID := rand.Generate() + + data, err := ioutil.ReadAll(part) + if err != nil { + return left, err + } + if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { + return left, err + } + left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) + break + } + } + return left, nil +} + +// blocksRightSide returns the blocks that are going to be at the right side of +// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain +// the same and splitting blocks and reuploading them as needed. +func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { + var right []azure.Block + + bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) + if err != nil { + return nil, err + } + + re := writeOffset + chunkSize - 1 // right end of written chunk + var elapsed int64 + for _, v := range bx.CommittedBlocks { + var ( + bs = elapsed // left end of current block + be = elapsed + int64(v.Size) - 1 // right end of current block + ) + + if bs > re { // take the block as is + right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) + } else if be > re { // current block needs to be splitted + part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) + if err != nil { + return right, err + } + newBlockID := rand.Generate() + + data, err := ioutil.ReadAll(part) + if err != nil { + return right, err + } + if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { + return right, err + } + right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) + } + elapsed += int64(v.Size) + } + return right, nil +} + +func getBlobSize(blocks azure.BlockListResponse) int64 { + var n int64 + for _, v := range blocks.CommittedBlocks { + n += int64(v.Size) + } + return n +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go new file mode 100644 index 00000000..32c2509e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go @@ -0,0 +1,339 @@ +package azure + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "reflect" + "strings" + "testing" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +func TestRandomWriter_writeChunkToBlocks(t *testing.T) { + s := NewStorageSimulator() + rw := newRandomBlobWriter(&s, 3) + rand := newBlockIDGenerator() + c := []byte("AAABBBCCCD") + + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) + if err != nil { + t.Fatal(err) + } + if expected := int64(len(c)); nn != expected { + t.Fatalf("wrong nn:%v, expected:%v", nn, expected) + } + if expected := 4; len(bw) != expected { + t.Fatal("unexpected written block count") + } + + bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) + if err != nil { + t.Fatal(err) + } + if expected := 0; len(bx.CommittedBlocks) != expected { + t.Fatal("unexpected committed block count") + } + if expected := 4; len(bx.UncommittedBlocks) != expected { + t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) + } + + if err := rw.bs.PutBlockList("a", "b", bw); err != nil { + t.Fatal(err) + } + + r, err := rw.bs.GetBlob("a", "b") + if err != nil { + t.Fatal(err) + } + assertBlobContents(t, r, c) +} + +func TestRandomWriter_blocksLeftSide(t *testing.T) { + blob := "AAAAABBBBBCCC" + cases := []struct { + offset int64 + expectedBlob string + expectedPattern []azure.BlockStatus + }{ + {0, "", []azure.BlockStatus{}}, // write to beginning, discard all + {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change + {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 + {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block + {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block + {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block + } + + for _, c := range cases { + s := NewStorageSimulator() + rw := newRandomBlobWriter(&s, 5) + rand := newBlockIDGenerator() + + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) + if err != nil { + t.Fatal(err) + } + if err := rw.bs.PutBlockList("a", "b", bw); err != nil { + t.Fatal(err) + } + bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) + if err != nil { + t.Fatal(err) + } + + bs := []azure.BlockStatus{} + for _, v := range bx { + bs = append(bs, v.Status) + } + + if !reflect.DeepEqual(bs, c.expectedPattern) { + t.Logf("Committed blocks %v", bw) + t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) + } + if rw.bs.PutBlockList("a", "b", bx); err != nil { + t.Fatal(err) + } + r, err := rw.bs.GetBlob("a", "b") + if err != nil { + t.Fatal(err) + } + cout, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + outBlob := string(cout) + if outBlob != c.expectedBlob { + t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) + } + } +} + +func TestRandomWriter_blocksRightSide(t *testing.T) { + blob := "AAAAABBBBBCCC" + cases := []struct { + offset int64 + size int64 + expectedBlob string + expectedPattern []azure.BlockStatus + }{ + {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob + {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block + {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block + {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains + {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block + {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block + {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index + {13, 20, "", []azure.BlockStatus{}}, // append to the end + } + + for _, c := range cases { + s := NewStorageSimulator() + rw := newRandomBlobWriter(&s, 5) + rand := newBlockIDGenerator() + + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) + if err != nil { + t.Fatal(err) + } + if err := rw.bs.PutBlockList("a", "b", bw); err != nil { + t.Fatal(err) + } + bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) + if err != nil { + t.Fatal(err) + } + + bs := []azure.BlockStatus{} + for _, v := range bx { + bs = append(bs, v.Status) + } + + if !reflect.DeepEqual(bs, c.expectedPattern) { + t.Logf("Committed blocks %v", bw) + t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) + } + if rw.bs.PutBlockList("a", "b", bx); err != nil { + t.Fatal(err) + } + r, err := rw.bs.GetBlob("a", "b") + if err != nil { + t.Fatal(err) + } + cout, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + outBlob := string(cout) + if outBlob != c.expectedBlob { + t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) + } + } +} + +func TestRandomWriter_Write_NewBlob(t *testing.T) { + var ( + s = NewStorageSimulator() + rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks + blob = randomContents(1024 * 7) // 7 KB blob + ) + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { + t.Fatal("expected error, got nil") + } + if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { + t.Fatal("expected error, got nil") + } + if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { + t.Fatal(err) + } else if expected := int64(len(blob)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if len(bx.CommittedBlocks) != 3 { + t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) + } + + // Replace first 512 bytes + leftChunk := randomContents(512) + blob = append(leftChunk, blob[512:]...) + if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(leftChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 4; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) + } + + // Replace last 512 bytes with 1024 bytes + rightChunk := randomContents(1024) + offset := int64(len(blob) - 512) + blob = append(blob[:offset], rightChunk...) + if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(rightChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 5; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) + } + + // Replace 2K-4K (overlaps 2 blocks from L/R) + newChunk := randomContents(1024 * 2) + offset = 1024 * 2 + blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) + if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(newChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 6; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) + } + + // Replace the entire blob + newBlob := randomContents(1024 * 30) + if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { + t.Fatal(err) + } else if expected := int64(len(newBlob)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, newBlob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 10; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) + } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { + t.Fatalf("committed block size does not indicate blob size") + } +} + +func Test_getBlobSize(t *testing.T) { + // with some committed blocks + if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ + CommittedBlocks: []azure.BlockResponse{ + {"A", 100}, + {"B", 50}, + {"C", 1}, + }, + UncommittedBlocks: []azure.BlockResponse{ + {"D", 200}, + }}); expected != size { + t.Fatalf("wrong blob size: %v, expected: %v", size, expected) + } + + // with no committed blocks + if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ + UncommittedBlocks: []azure.BlockResponse{ + {"A", 100}, + {"B", 50}, + {"C", 1}, + {"D", 200}, + }}); expected != size { + t.Fatalf("wrong blob size: %v, expected: %v", size, expected) + } +} + +func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { + out, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(out, expected) { + t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) + } +} + +func randomContents(length int64) []byte { + b := make([]byte, length) + for i := range b { + b[i] = byte(rand.Intn(2 << 8)) + } + return b +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go new file mode 100644 index 00000000..095489d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go @@ -0,0 +1,49 @@ +package azure + +import ( + "bytes" + "io" +) + +type blockBlobWriter interface { + GetSize(container, blob string) (int64, error) + WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) +} + +// zeroFillWriter enables writing to an offset outside a block blob's size +// by offering the chunk to the underlying writer as a contiguous data with +// the gap in between filled with NUL (zero) bytes. +type zeroFillWriter struct { + blockBlobWriter +} + +func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { + w := zeroFillWriter{} + w.blockBlobWriter = b + return w +} + +// Write writes the given chunk to the specified existing blob even though +// offset is out of blob's size. The gaps are filled with zeros. Returned +// written number count does not include zeros written. +func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { + size, err := z.blockBlobWriter.GetSize(container, blob) + if err != nil { + return 0, err + } + + var reader io.Reader + var zeroPadding int64 + if offset <= size { + reader = chunk + } else { + zeroPadding = offset - size + offset = size // adjust offset to be the append index + zeros := bytes.NewReader(make([]byte, zeroPadding)) + reader = io.MultiReader(zeros, chunk) + } + + nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) + nn -= zeroPadding + return nn, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go new file mode 100644 index 00000000..49361791 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go @@ -0,0 +1,126 @@ +package azure + +import ( + "bytes" + "testing" +) + +func Test_zeroFillWrite_AppendNoGap(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*1) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + firstChunk := randomContents(1024*3 + 512) + if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(firstChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, firstChunk) + } + + secondChunk := randomContents(256) + if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(secondChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(firstChunk, secondChunk...)) + } + +} + +func Test_zeroFillWrite_StartWithGap(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*2) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + chunk := randomContents(1024 * 5) + padding := int64(1024*2 + 256) + if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(chunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(make([]byte, padding), chunk...)) + } +} + +func Test_zeroFillWrite_AppendWithGap(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*2) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + firstChunk := randomContents(1024*3 + 512) + if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { + t.Fatal(err) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, firstChunk) + } + + secondChunk := randomContents(256) + padding := int64(1024 * 4) + if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(secondChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) + } +} + +func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*2) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + firstChunk := randomContents(1024 * 3) + if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { + t.Fatal(err) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, firstChunk) + } + + // in this case, zerofill won't be used + secondChunk := randomContents(256) + if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(secondChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go new file mode 100644 index 00000000..60af06b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -0,0 +1,169 @@ +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnecessary field. +package base + +import ( + "io" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + storagedriver.StorageDriver +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.GetContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.GetContent(ctx, path) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.PutContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.PutContent(ctx, path, content) +} + +// ReadStream wraps ReadStream of underlying storage driver. +func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.ReadStream(ctx, path, offset) +} + +// WriteStream wraps WriteStream of underlying storage driver. +func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return 0, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.WriteStream(ctx, path, offset, reader) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Stat(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.Stat(ctx, path) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.List(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.List(ctx, path) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) + + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath} + } + + return base.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Delete(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.Delete(ctx, path) +} + +// URLFor wraps URLFor of underlying storage driver. +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.URLFor(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return "", storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.URLFor(ctx, path, options) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go new file mode 100644 index 00000000..e84f0026 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go @@ -0,0 +1,55 @@ +package factory + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go new file mode 100644 index 00000000..e5064029 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go @@ -0,0 +1,79 @@ +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go new file mode 100644 index 00000000..d5d8708c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go @@ -0,0 +1,291 @@ +package filesystem + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "filesystem" +const defaultRootDirectory = "/var/lib/registry" + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters), nil +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +func FromParameters(parameters map[string]interface{}) *Driver { + var rootDirectory = defaultRootDirectory + if parameters != nil { + rootDir, ok := parameters["rootdirectory"] + if ok { + rootDirectory = fmt.Sprint(rootDir) + } + } + return New(rootDirectory) +} + +// New constructs a new Driver with a given rootDirectory +func New(rootDirectory string) *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + rootDirectory: rootDirectory, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return os.Truncate(d.fullPath(subPath), int64(len(contents))) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +// WriteStream stores the contents of the provided io.Reader at a location +// designated by the given path. +func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { + // TODO(stevvooe): This needs to be a requirement. + // if !path.IsAbs(subPath) { + // return fmt.Errorf("absolute path required: %q", subPath) + // } + + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0755); err != nil { + return 0, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + // TODO(stevvooe): A few missing conditions in storage driver: + // 1. What if the path is already a directory? + // 2. Should number 1 be exposed explicitly in storagedriver? + // 2. Can this path not exist, even if we create above? + return 0, err + } + defer fp.Close() + + nn, err = fp.Seek(offset, os.SEEK_SET) + if err != nil { + return 0, err + } + + if nn != offset { + return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) + } + + return io.Copy(fp, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { + if subPath[len(subPath)-1] != '/' { + subPath += "/" + } + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go new file mode 100644 index 00000000..8b48b431 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go @@ -0,0 +1,26 @@ +package filesystem + +import ( + "io/ioutil" + "os" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return New(root), nil + }, testsuites.NeverSkip) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go new file mode 100644 index 00000000..2d121e1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go @@ -0,0 +1,262 @@ +package inmemory + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "inmemory" + +func init() { + factory.Register(driverName, &inMemoryDriverFactory{}) +} + +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +type driver struct { + root *dir + mutex sync.RWMutex +} + +// baseEmbed allows us to hide the Base embed. +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { + baseEmbed // embedded, hidden base driver. +} + +var _ storagedriver.StorageDriver = &Driver{} + +// New constructs a new Driver. +func New() *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface. + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + return ioutil.ReadAll(rc) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + f, err := d.root.mkfile(p) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + path = normalize(path) + found := d.root.find(path) + + if found.path() != path { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := normalize(path) + + f, err := d.root.mkfile(normalized) + if err != nil { + return 0, fmt.Errorf("not a file") + } + + // Unlock while we are reading from the source, in case we are reading + // from the same mfs instance. This can be fixed by a more granular + // locking model. + d.mutex.Unlock() + d.mutex.RLock() // Take the readlock to block other writers. + var buf bytes.Buffer + + nn, err = buf.ReadFrom(reader) + if err != nil { + // TODO(stevvooe): This condition is odd and we may need to clarify: + // we've read nn bytes from reader but have written nothing to the + // backend. What is the correct return value? Really, the caller needs + // to know that the reader has been advanced and reattempting the + // operation is incorrect. + d.mutex.RUnlock() + d.mutex.Lock() + return nn, err + } + + d.mutex.RUnlock() + d.mutex.Lock() + f.WriteAt(buf.Bytes(), offset) + return nn, err +} + +// Stat returns info about the provided path. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + found := d.root.find(path) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... + } + + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err + } + } + + return entries, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err + } +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: path} + default: + return err + } +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go new file mode 100644 index 00000000..dbc1916f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go @@ -0,0 +1,19 @@ +package inmemory + +import ( + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(), nil + } + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go new file mode 100644 index 00000000..cdefacfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go @@ -0,0 +1,338 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + p = normalize(p) + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if normalize(srcDirname) != normalize(sp.path()) { + return errNotExists + } + + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] + if !ok { + return errNotExists + } + + delete(spd.children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if normalize(dirname) != normalize(parent.path()) { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data + } + + f.mod = time.Now() + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go new file mode 100644 index 00000000..31c00afc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -0,0 +1,119 @@ +// Package middleware - cloudfront wrapper for storage libs +// N.B. currently only works with S3, not arbitrary sites +// +package middleware + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "time" + + "github.com/AdRoll/goamz/cloudfront" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that +// constructs temporary signed CloudFront URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this CloudFront content URL. +type cloudFrontStorageMiddleware struct { + storagedriver.StorageDriver + cloudfront *cloudfront.CloudFront + duration time.Duration +} + +var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} + +// newCloudFrontLayerHandler constructs and returns a new CloudFront +// LayerHandler implementation. +// Required options: baseurl, privatekey, keypairid +func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("No baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("No privatekey provided") + } + pkPath, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + kpid, ok := options["keypairid"] + if !ok { + return nil, fmt.Errorf("No keypairid provided") + } + keypairID, ok := kpid.(string) + if !ok { + return nil, fmt.Errorf("keypairid must be a string") + } + + pkBytes, err := ioutil.ReadFile(pkPath) + if err != nil { + return nil, fmt.Errorf("Failed to read privatekey file: %s", err) + } + + block, _ := pem.Decode([]byte(pkBytes)) + if block == nil { + return nil, fmt.Errorf("Failed to decode private key as an rsa private key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + + cf := cloudfront.New(baseURL, privateKey, keypairID) + + duration := 20 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("Invalid duration: %s", err) + } + duration = dur + } + } + + return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil +} + +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + +// Resolve returns an http.Handler which can serve the contents of the given +// Layer, or an error if not supported by the storagedriver. +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + // TODO(endophage): currently only supports S3 + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) + } + + cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) + if err != nil { + return "", err + } + return cfURL, nil +} + +// init registers the cloudfront layerHandler backend. +func init() { + storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go new file mode 100644 index 00000000..7e40a8dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go @@ -0,0 +1,39 @@ +package storagemiddleware + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// InitFunc is the type of a StorageMiddleware factory function and is +// used to register the constructor for different StorageMiddleware backends. +type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) + +var storageMiddlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a StorageMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if storageMiddlewares == nil { + storageMiddlewares = make(map[string]InitFunc) + } + if _, exists := storageMiddlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + storageMiddlewares[name] = initFunc + + return nil +} + +// Get constructs a StorageMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { + if storageMiddlewares != nil { + if initFunc, exists := storageMiddlewares[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no storage middleware registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go new file mode 100644 index 00000000..d1bc932f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go new file mode 100644 index 00000000..cec32026 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -0,0 +1,813 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// +build include_oss + +package oss + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/distribution/context" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyID string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + Endpoint string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] + if !ok { + return nil, fmt.Errorf("No accesskeyid parameter provided") + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + return nil, fmt.Errorf("No accesskeysecret parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The internal parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + + params := DriverParameters{ + AccessKeyID: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) + bucket := client.Bucket(params.Bucket) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + return nil, parseError(path, err) + } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []oss.Part{} + var part oss.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the OSS + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying OSS library should handle it, it doesn't seem to + // be part of the shouldRetry function (see denverdino/aliyungo/oss). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part oss.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! + } + + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the OSS package does not. We may add oss + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to OSS slows to a crawl. If the RequestTimeout + // ends up getting added to the OSS library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *oss.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) + } + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.ossPath(path), nil) + if err != nil { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Path(d.ossPath(path))) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{}, + d.Bucket.Path(d.ossPath(path))) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), + oss.CopyOptions{ + //Options: d.getOptions(), + //ContentType: d.getContentType() + }, + d.Bucket.Path(d.ossPath(sourcePath))) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + logrus.Infof("expiresTime: %d", expiresTime) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("expiresTime: %d", expiresTime) + testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("testURL: %s", testURL) + return testURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the OSS bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).ossPath(path) +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go new file mode 100644 index 00000000..fbae5d9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go @@ -0,0 +1,144 @@ +// +build include_oss + +package oss + +import ( + "io/ioutil" + + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + //"log" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go new file mode 100644 index 00000000..655c68a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go @@ -0,0 +1,3 @@ +// Package rados implements the rados storage driver backend. Support can be +// enabled by including the "include_rados" build tag. +package rados diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go new file mode 100644 index 00000000..0ea10a89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go @@ -0,0 +1,630 @@ +// +build include_rados + +package rados + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "path" + "strconv" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/uuid" + "github.com/noahdesu/go-ceph/rados" +) + +const driverName = "rados" + +// Prefix all the stored blob +const objectBlobPrefix = "blob:" + +// Stripes objects size to 4M +const defaultChunkSize = 4 << 20 +const defaultXattrTotalSizeName = "total-size" + +// Max number of keys fetched from omap at each read operation +const defaultKeysFetched = 1 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + poolname string + username string + chunksize uint64 +} + +func init() { + factory.Register(driverName, &radosDriverFactory{}) +} + +// radosDriverFactory implements the factory.StorageDriverFactory interface +type radosDriverFactory struct{} + +func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *rados.Conn + Ioctx *rados.IOContext + chunksize uint64 +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - poolname: the ceph pool name +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + + pool, ok := parameters["poolname"] + if !ok { + return nil, fmt.Errorf("No poolname parameter provided") + } + + username, ok := parameters["username"] + if !ok { + username = "" + } + + chunksize := uint64(defaultChunkSize) + chunksizeParam, ok := parameters["chunksize"] + if ok { + chunksize, ok = chunksizeParam.(uint64) + if !ok { + return nil, fmt.Errorf("The chunksize parameter should be a number") + } + } + + params := DriverParameters{ + fmt.Sprint(pool), + fmt.Sprint(username), + chunksize, + } + + return New(params) +} + +// New constructs a new Driver +func New(params DriverParameters) (*Driver, error) { + var conn *rados.Conn + var err error + + if params.username != "" { + log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) + conn, err = rados.NewConnWithUser(params.username) + } else { + log.Infof("Opening connection to pool %s", params.poolname) + conn, err = rados.NewConn() + } + + if err != nil { + return nil, err + } + + err = conn.ReadDefaultConfigFile() + if err != nil { + return nil, err + } + + err = conn.Connect() + if err != nil { + return nil, err + } + + log.Infof("Connected") + + ioctx, err := conn.OpenIOContext(params.poolname) + + log.Infof("Connected to pool %s", params.poolname) + + if err != nil { + return nil, err + } + + d := &driver{ + Ioctx: ioctx, + Conn: conn, + chunksize: params.chunksize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +type readStreamReader struct { + driver *driver + oid string + size uint64 + offset uint64 +} + +func (r *readStreamReader) Read(b []byte) (n int, err error) { + // Determine the part available to read + bufferOffset := uint64(0) + bufferSize := uint64(len(b)) + + // End of the object, read less than the buffer size + if bufferSize > r.size-r.offset { + bufferSize = r.size - r.offset + } + + // Fill `b` + for bufferOffset < bufferSize { + // Get the offset in the object chunk + chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) + + // Determine the best size to read + bufferEndOffset := bufferSize + if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { + bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) + } + + // Read the chunk + n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) + + if err != nil { + return int(bufferOffset), err + } + + bufferOffset += uint64(n) + r.offset += uint64(n) + } + + // EOF if the offset is at the end of the object + if r.offset == r.size { + return int(bufferOffset), io.EOF + } + + return int(bufferOffset), nil +} + +func (r *readStreamReader) Close() error { + return nil +} + +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // get object stat + stat, err := d.Stat(ctx, path) + + if err != nil { + return nil, err + } + + if offset > stat.Size() { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return &readStreamReader{ + driver: d, + oid: oid, + size: uint64(stat.Size()), + offset: uint64(offset), + }, nil +} + +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + buf := make([]byte, d.chunksize) + totalRead = 0 + + oid, err := d.getOid(path) + if err != nil { + switch err.(type) { + // Trying to write new object, generate new blob identifier for it + case storagedriver.PathNotFoundError: + oid = d.generateOid() + err = d.putOid(path, oid) + if err != nil { + return 0, err + } + default: + return 0, err + } + } else { + // Check total object size only for existing ones + totalSize, err := d.getXattrTotalSize(ctx, oid) + if err != nil { + return 0, err + } + + // If offset if after the current object size, fill the gap with zeros + for totalSize < uint64(offset) { + sizeToWrite := d.chunksize + if totalSize-uint64(offset) < sizeToWrite { + sizeToWrite = totalSize - uint64(offset) + } + + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) + err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) + if err != nil { + return totalRead, err + } + + totalSize += sizeToWrite + } + } + + // Writer + for { + // Align to chunk size + sizeRead := uint64(0) + sizeToRead := uint64(offset+totalRead) % d.chunksize + if sizeToRead == 0 { + sizeToRead = d.chunksize + } + + // Read from `reader` + for sizeRead < sizeToRead { + nn, err := reader.Read(buf[sizeRead:sizeToRead]) + sizeRead += uint64(nn) + + if err != nil { + if err != io.EOF { + return totalRead, err + } + + break + } + } + + // End of file and nothing was read + if sizeRead == 0 { + break + } + + // Write chunk object + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) + err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) + + if err != nil { + return totalRead, err + } + + // Update total object size as xattr in the first chunk of the object + err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) + if err != nil { + return totalRead, err + } + + totalRead += int64(sizeRead) + + // End of file + if sizeRead < sizeToRead { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // the path is a virtual directory? + if oid == "" { + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: 0, + IsDir: true, + }, + }, nil + } + + // stat first chunk + stat, err := d.Ioctx.Stat(oid + "-0") + + if err != nil { + return nil, err + } + + // get total size of chunked object + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(totalSize), + ModTime: stat.ModTime, + }, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { + files, err := d.listDirectoryOid(dirPath) + + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(files)) + for k := range files { + keys = append(keys, path.Join(dirPath, k)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + // Get oid + oid, err := d.getOid(sourcePath) + + if err != nil { + return err + } + + // Move reference + err = d.putOid(destPath, oid) + + if err != nil { + return err + } + + // Delete old reference + err = d.deleteOid(sourcePath) + + if err != nil { + return err + } + + return nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, objectPath string) error { + // Get oid + oid, err := d.getOid(objectPath) + + if err != nil { + return err + } + + // Deleting virtual directory + if oid == "" { + objects, err := d.listDirectoryOid(objectPath) + if err != nil { + return err + } + + for object := range objects { + err = d.Delete(ctx, path.Join(objectPath, object)) + if err != nil { + return err + } + } + } else { + // Delete object chunks + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return err + } + + for offset := uint64(0); offset < totalSize; offset += d.chunksize { + chunkName, _ := d.getChunkNameFromOffset(oid, offset) + + err = d.Ioctx.Delete(chunkName) + if err != nil { + return err + } + } + + // Delete reference + err = d.deleteOid(objectPath) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +// Generate a blob identifier +func (d *driver) generateOid() string { + return objectBlobPrefix + uuid.Generate().String() +} + +// Reference a object and its hierarchy +func (d *driver) putOid(objectPath string, oid string) error { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + createParentReference := true + + // After creating this reference, skip the parents referencing since the + // hierarchy already exists + if oid == "" { + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + if (err == nil) && (len(firstReference) > 0) { + createParentReference = false + } + } + + oids := map[string][]byte{ + base: []byte(oid), + } + + // Reference object + err := d.Ioctx.SetOmap(directory, oids) + if err != nil { + return err + } + + // Esure parent virtual directories + if createParentReference && directory != "/" { + return d.putOid(directory, "") + } + + return nil +} + +// Get the object identifier from an object name +func (d *driver) getOid(objectPath string) (string, error) { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + + files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) + + if (err != nil) || (files[base] == nil) { + return "", storagedriver.PathNotFoundError{Path: objectPath} + } + + return string(files[base]), nil +} + +// List the objects of a virtual directory +func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { + return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) +} + +// Remove a file from the files hierarchy +func (d *driver) deleteOid(objectPath string) error { + // Remove object reference + directory := path.Dir(objectPath) + base := path.Base(objectPath) + err := d.Ioctx.RmOmapKeys(directory, []string{base}) + + if err != nil { + return err + } + + // Remove virtual directory if empty (no more references) + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + + if err != nil { + return err + } + + if len(firstReference) == 0 { + // Delete omap + err := d.Ioctx.Delete(directory) + + if err != nil { + return err + } + + // Remove reference on parent omaps + if directory != "/" { + return d.deleteOid(directory) + } + } + + return nil +} + +// Takes an offset in an chunked object and return the chunk name and a new +// offset in this chunk object +func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { + chunkID := offset / d.chunksize + chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) + chunkedOffset := offset % d.chunksize + return chunkedOid, chunkedOffset +} + +// Set the total size of a chunked object `oid` +func (d *driver) setXattrTotalSize(oid string, size uint64) error { + // Convert uint64 `size` to []byte + xattr := make([]byte, binary.MaxVarintLen64) + binary.LittleEndian.PutUint64(xattr, size) + + // Save the total size as a xattr in the first chunk + return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) +} + +// Get the total size of the chunked object `oid` stored as xattr +func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { + // Fetch xattr as []byte + xattr := make([]byte, binary.MaxVarintLen64) + xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) + + if err != nil { + return 0, err + } + + if xattrLength != len(xattr) { + context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) + return 0, storagedriver.PathNotFoundError{Path: oid} + } + + // Convert []byte as uint64 + totalSize := binary.LittleEndian.Uint64(xattr) + + return totalSize, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go new file mode 100644 index 00000000..ce367fb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go @@ -0,0 +1,40 @@ +// +build include_rados + +package rados + +import ( + "os" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + poolname := os.Getenv("RADOS_POOL") + username := os.Getenv("RADOS_USER") + + driverConstructor := func() (storagedriver.StorageDriver, error) { + parameters := DriverParameters{ + poolname, + username, + defaultChunkSize, + } + + return New(parameters) + } + + skipCheck := func() string { + if poolname == "" { + return "RADOS_POOL must be set to run Rado tests" + } + return "" + } + + testsuites.RegisterSuite(driverConstructor, skipCheck) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go new file mode 100644 index 00000000..552c221d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go @@ -0,0 +1,826 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the AdRoll/goamz client library for interfacing with +// s3. +// +// Because s3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that s3 guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/AdRoll/goamz/aws" + "github.com/AdRoll/goamz/s3" + "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region aws.Region + Encrypt bool + Secure bool + V4Auth bool + ChunkSize int64 + RootDirectory string +} + +func init() { + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskey"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["secretkey"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(fmt.Sprint(regionName)) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + v4AuthBool := false + v4Auth, ok := parameters["v4auth"] + if ok { + v4AuthBool, ok = v4Auth.(bool) + if !ok { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + v4AuthBool, + chunkSize, + fmt.Sprint(rootDirectory), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) + if err != nil { + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) + } + + if !params.Secure { + params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) + } + + s3obj := s3.New(auth, params.Region) + bucket := s3obj.Bucket(params.Bucket) + + if params.V4Auth { + s3obj.Signature = aws.V4Signature + } else { + if params.Region.Name == "eu-central-1" { + return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") + } + } + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.s3Path(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) + if err != nil { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []s3.Part{} + var part s3.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the s3 + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying s3 library should handle it, it doesn't seem to + // be part of the shouldRetry function (see AdRoll/goamz/s3). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part s3.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! + } + + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the s3 package does not. We may add s3 + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to s3 slows to a crawl. If the RequestTimeout + // ends up getting added to the s3 library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *s3.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) + } + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.s3Path(path), nil) + if err != nil { + if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Name+"/"+d.s3Path(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + s3.CopyOptions{}, + d.Bucket.Name+"/"+d.s3Path(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), + s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]s3.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + +func (d *driver) getOptions() s3.Options { + return s3.Options{SSE: d.Encrypt} +} + +func getPermissions() s3.ACL { + return s3.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go new file mode 100644 index 00000000..70172a6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go @@ -0,0 +1,138 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/AdRoll/goamz/aws" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + v4auth := os.Getenv("S3_USE_V4_AUTH") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4AuthBool := false + if v4auth != "" { + v4AuthBool, err = strconv.ParseBool(v4auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + aws.GetRegion(region), + encryptBool, + secureBool, + v4AuthBool, + minChunkSize, + rootDirectory, + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go new file mode 100644 index 00000000..bade099f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -0,0 +1,125 @@ +package driver + +import ( + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution/context" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. +type StorageDriver interface { + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(ctx context.Context, path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(ctx context.Context, path string, content []byte) error + + // ReadStream retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + + // WriteStream stores the contents of the provided io.ReadCloser at a + // location designated by the given path. + // May be used to resume writing a stream by providing a nonzero offset. + // The offset must be no larger than the CurrentSize for this path. + WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(ctx context.Context, path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + //given path. + List(ctx context.Context, path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. + Move(ctx context.Context, sourcePath string, destPath string) error + + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(ctx context.Context, path string) error + + // URLFor returns a URL which may be used to retrieve the content stored at + // the given path, possibly using the given options. + // May return an ErrUnsupportedMethod in certain StorageDriver + // implementations. + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to lowercase alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) + +// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. +var ErrUnsupportedMethod = errors.New("unsupported method") + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("Path not found: %s", err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("Invalid path: %s", err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go new file mode 100644 index 00000000..0921ccc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -0,0 +1,657 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// Since Swift has no concept of directories (directories are an abstration), +// empty objects are created with the MIME type application/vnd.swift.directory. +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bytes" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + gopath "path" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + Region string + Container string + Prefix string + InsecureSkipVerify bool + ChunkSize int +} + +type swiftInfo map[string]interface{} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + ChunkSize int +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + BulkDeleteSupport: detectBulkDelete(params.AuthURL), + ChunkSize: params.ChunkSize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + var ( + segments []swift.Object + multi io.Reader + paddingReader io.Reader + currentLength int64 + cursor int64 + segmentPath string + ) + + partNumber := 1 + chunkSize := int64(d.ChunkSize) + zeroBuf := make([]byte, d.ChunkSize) + + getSegment := func() string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) + } + + max := func(a int64, b int64) int64 { + if a > b { + return a + } + return b + } + + createManifest := true + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + manifest, ok := headers["X-Object-Manifest"] + if !ok { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { + return 0, err + } + segments = append(segments, info) + } else { + _, segmentPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentPath); err != nil { + return 0, err + } + createManifest = false + } + currentLength = info.Bytes + } else if err == swift.ObjectNotFound { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + } else { + return 0, err + } + + if createManifest { + if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { + return 0, err + } + } + + // First, we skip the existing segments that are not modified by this call + for i := range segments { + if offset < cursor+segments[i].Bytes { + break + } + cursor += segments[i].Bytes + partNumber++ + } + + // We reached the end of the file but we haven't reached 'offset' yet + // Therefore we add blocks of zeros + if offset >= currentLength { + for offset-currentLength >= chunkSize { + // Insert a block a zero + _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + currentLength += chunkSize + partNumber++ + } + + cursor = currentLength + paddingReader = bytes.NewReader(zeroBuf) + } else if offset-cursor > 0 { + // Offset is inside the current segment : we need to read the + // data from the beginning of the segment to offset + file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + defer file.Close() + paddingReader = file + } + + readers := []io.Reader{} + if paddingReader != nil { + readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) + } + readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) + multi = io.MultiReader(readers...) + + writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { + currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + } + return false, bytesRead, err + } + + n, err := io.Copy(currentSegment, multi) + if err != nil { + return false, bytesRead, err + } + + if n > 0 { + defer currentSegment.Close() + bytesRead += n - max(0, offset-cursor) + } + + if n < chunkSize { + // We wrote all the data + if cursor+n < currentLength { + // Copy the end of the chunk + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + _, copyErr := io.Copy(currentSegment, file) + + if err := file.Close(); err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + if copyErr != nil { + return false, bytesRead, copyErr + } + } + + return true, bytesRead, nil + } + + multi = io.LimitReader(reader, chunkSize) + cursor += chunkSize + partNumber++ + + return false, bytesRead, nil + } + + finished := false + read := int64(0) + bytesRead := int64(0) + for finished == false { + finished, read, err = writeSegment(getSegment()) + bytesRead += read + if err != nil { + return bytesRead, err + } + } + + return bytesRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // On Swift 1.12, the 'bytes' field is always 0 + // so we need to do a second HEAD request + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + } + + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + if d.BulkDeleteSupport { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + segContainer, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + + for _, s := range segments { + if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: s.Name} + } + return err + } + } + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return segments, err +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func detectBulkDelete(authURL string) (bulkDelete bool) { + resp, err := http.Get(gopath.Join(authURL, "..", "..") + "/info") + if err == nil { + defer resp.Body.Close() + decoder := json.NewDecoder(resp.Body) + var infos swiftInfo + if decoder.Decode(&infos) == nil { + _, bulkDelete = infos["bulk_delete"] + } + } + return +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go new file mode 100644 index 00000000..6be2238a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go @@ -0,0 +1,135 @@ +package swift + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/ncw/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var swiftDriverConstructor func(prefix string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + container string + region string + insecureSkipVerify bool + swiftServer *swifttest.SwiftServer + err error + ) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + prefix, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(prefix) + + swiftDriverConstructor = func(root string) (*Driver, error) { + parameters := Parameters{ + username, + password, + authURL, + tenant, + tenantID, + domain, + domainID, + region, + container, + root, + insecureSkipVerify, + defaultChunkSize, + } + + return New(parameters) + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(prefix) + } + + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) +} + +func TestEmptyRootList(t *testing.T) { + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go new file mode 100644 index 00000000..770c428c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -0,0 +1,1163 @@ +package testsuites + +import ( + "bytes" + "crypto/sha1" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "sort" + "sync" + "testing" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "gopkg.in/check.v1" +) + +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +// RegisterSuite registers an in-process storage driver test suite with +// the go test runner. +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { + check.Suite(&DriverSuite{ + Constructor: driverConstructor, + SkipCheck: skipCheck, + ctx: context.Background(), + }) +} + +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. +type SkipCheck func() (reason string) + +// NeverSkip is a default SkipCheck which never skips the suite. +var NeverSkip SkipCheck = func() string { return "" } + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + SkipCheck + storagedriver.StorageDriver + ctx context.Context +} + +// SetUpSuite sets up the gocheck test suite. +func (suite *DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } + d, err := suite.Constructor() + c.Assert(err, check.IsNil) + suite.StorageDriver = d +} + +// TearDownSuite tears down the gocheck test suite. +func (suite *DriverSuite) TearDownSuite(c *check.C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, check.IsNil) + } +} + +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List(suite.ctx, "/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + checksum := sha1.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, fileSize) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + +// TestReadStreamWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { + suite.testContinueStreamAppend(c, int64(10*1024*1024)) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { + suite.testContinueStreamAppend(c, int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + contentsChunk4 := randomContents(chunkSize) + zeroChunk := make([]byte, int64(chunkSize)) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) + + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + // Test re-writing the last chunk + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) + + // Writing past size of file extends file (no offset error). We would like + // to write chunk 4 one chunk length past chunk 3. It should be successful + // and the resulting file will be 5 chunks long, with a chunk of all + // zeros. + + fullContents = append(fullContents, zeroChunk...) + fullContents = append(fullContents, contentsChunk4...) + + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, chunkSize) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) + + received, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(len(received), check.Equals, len(fullContents)) + c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) + c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) + c.Assert(received, check.DeepEquals, fullContents) + + // Ensure that negative offsets return correct error. + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { + filename := randomPath(32) + + _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList(c *check.C) { + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) + + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) + c.Assert(err, check.IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(suite.ctx, "/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) + c.Assert(err, check.IsNil) + + sort.Strings(keys) + c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) +} + +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.StorageDriver.Delete(suite.ctx, "/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestURLFor checks that the URLFor method functions properly, but only if it +// is implemented +func (suite *DriverSuite) TestURLFor(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) + if err == storagedriver.ErrUnsupportedMethod { + return + } + c.Assert(err, check.IsNil) + + response, err := http.Get(url) + c.Assert(err, check.IsNil) + defer response.Body.Close() + + read, err := ioutil.ReadAll(response.Body) + c.Assert(err, check.IsNil) + c.Assert(read, check.DeepEquals, contents) + + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) + if err == storagedriver.ErrUnsupportedMethod { + return + } + c.Assert(err, check.IsNil) + + response, err = http.Head(url) + c.Assert(response.StatusCode, check.Equals, 200) + c.Assert(response.ContentLength, check.Equals, int64(32)) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { + filename := randomPath(32) + err := suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, dirname) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like WriteStream does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to WriteStream concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(c, size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +// TestEventualConsistency checks that if stat says that a file is a certain size, then +// you can freely read from the file (this is the only guarantee that the driver needs to provide) +func (suite *DriverSuite) TestEventualConsistency(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + var offset int64 + var misswrites int + var chunkSize int64 = 32 + + for i := 0; i < 1024; i++ { + contents := randomContents(chunkSize) + read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + + // We are most concerned with being able to read data as soon as Stat declares + // it is uploaded. This is the strongest guarantee that some drivers (that guarantee + // at best eventual consistency) absolutely need to provide. + if fi.Size() == offset+chunkSize { + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) + + reader.Close() + offset += read + } else { + misswrites++ + } + } + + if misswrites > 0 { + c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") + } + + c.Assert(misswrites, check.Not(check.Equals), 1024) +} + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(suite.ctx, parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} + +func randomContents(length int64) []byte { + b := make([]byte, length) + for i := range b { + b[i] = byte(rand.Intn(2 << 8)) + } + return b +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + for i := 0; i < len(p) && rr.r > 0; i++ { + p[i] = byte(rand.Intn(255)) + n++ + rr.r-- + } + if rr.r == 0 { + err = io.EOF + } + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go new file mode 100644 index 00000000..b3a5f520 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go @@ -0,0 +1,177 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *fileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go new file mode 100644 index 00000000..774a864b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go @@ -0,0 +1,199 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "io" + mrand "math/rand" + "os" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestSimpleRead(t *testing.T) { + ctx := context.Background() + content := make([]byte, 1<<20) + n, err := rand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read didn't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + if err != nil { + t.Fatalf("error allocating file reader: %v", err) + } + + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("error getting digest verifier: %s", err) + } + + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify read data") + } +} + +func TestFileReaderSeek(t *testing.T) { + driver := inmemory.New() + pattern := "01234567890ab" // prime length block + repititions := 1024 + path := "/patterned" + content := bytes.Repeat([]byte(pattern), repititions) + ctx := context.Background() + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + + if err != nil { + t.Fatalf("unexpected error creating file reader: %v", err) + } + + // Seek all over the place, in blocks of pattern size and make sure we get + // the right data. + for _, repitition := range mrand.Perm(repititions - 1) { + targetOffset := int64(len(pattern) * repitition) + // Seek to a multiple of pattern size and read pattern size bytes + offset, err := fr.Seek(targetOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if offset != targetOffset { + t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) + } + + p := make([]byte, len(pattern)) + + n, err := fr.Read(p) + if err != nil { + t.Fatalf("error reading pattern: %v", err) + } + + if n != len(pattern) { + t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) + } + + if string(p) != pattern { + t.Fatalf("incorrect read content: %q != %q", p, pattern) + } + + // Check offset + current, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if current != targetOffset+int64(len(pattern)) { + t.Fatalf("unexpected offset after read: %v", err) + } + } + + start, err := fr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking to start: %v", err) + } + + if start != 0 { + t.Fatalf("expected to seek to start: %v != 0", start) + } + + end, err := fr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("expected to seek to end: %v != %v", end, len(content)) + } + + // 4. Seek before start, ensure error. + + // seek before start + before, err := fr.Seek(-1, os.SEEK_SET) + if err == nil { + t.Fatalf("error expected, returned offset=%v", before) + } + + // 5. Seek after end, + after, err := fr.Seek(1, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error expected, returned offset=%v", after) + } + + p := make([]byte, 16) + n, err := fr.Read(p) + + if n != 0 { + t.Fatalf("bytes reads %d != %d", n, 0) + } + + if err != io.EOF { + t.Fatalf("expected io.EOF, got %v", err) + } +} + +// TestFileReaderNonExistentFile ensures the reader behaves as expected with a +// missing or zero-length remote file. While the file may not exist, the +// reader should not error out on creation and should return 0-bytes from the +// read method, with an io.EOF error. +func TestFileReaderNonExistentFile(t *testing.T) { + driver := inmemory.New() + fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) + if err != nil { + t.Fatalf("unexpected error initializing reader: %v", err) + } + + var buf [1024]byte + + n, err := fr.Read(buf[:]) + if n != 0 { + t.Fatalf("non-zero byte read reported: %d != 0", n) + } + + if err != io.EOF { + t.Fatalf("read on missing file should return io.EOF, got %v", err) + } +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestFileReaderErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is a incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go new file mode 100644 index 00000000..529fa673 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go @@ -0,0 +1,180 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +const ( + fileWriterBufferSize = 5 << 20 +) + +// fileWriter implements a remote file writer backed by a storage driver. +type fileWriter struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + + // mutable fields + size int64 // size of the file, aka the current end + offset int64 // offset is the current write offset + err error // terminal error, if set, reader is closed +} + +type bufferedFileWriter struct { + fileWriter + bw *bufio.Writer +} + +// fileWriterInterface makes the desired io compliant interface that the +// filewriter should implement. +type fileWriterInterface interface { + io.WriteSeeker + io.ReaderFrom + io.Closer +} + +var _ fileWriterInterface = &fileWriter{} + +// newFileWriter returns a prepared fileWriter for the driver and path. This +// could be considered similar to an "open" call on a regular filesystem. +func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { + fw := fileWriter{ + driver: driver, + path: path, + ctx: ctx, + } + + if fi, err := driver.Stat(ctx, path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // ignore, offset is zero + default: + return nil, err + } + } else { + if fi.IsDir() { + return nil, fmt.Errorf("cannot write to a directory") + } + + fw.size = fi.Size() + } + + buffered := bufferedFileWriter{ + fileWriter: fw, + } + buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) + + return &buffered, nil +} + +// wraps the fileWriter.Write method to buffer small writes +func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { + return bfw.bw.Write(p) +} + +// wraps fileWriter.Close to ensure the buffer is flushed +// before we close the writer. +func (bfw *bufferedFileWriter) Close() (err error) { + if err = bfw.Flush(); err != nil { + return err + } + err = bfw.fileWriter.Close() + return err +} + +// wraps fileWriter.Seek to ensure offset is handled +// correctly in respect to pending data in the buffer +func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { + if err := bfw.Flush(); err != nil { + return 0, err + } + return bfw.fileWriter.Seek(offset, whence) +} + +// wraps bufio.Writer.Flush to allow intermediate flushes +// of the bufferedFileWriter +func (bfw *bufferedFileWriter) Flush() error { + return bfw.bw.Flush() +} + +// Write writes the buffer p at the current write offset. +func (fw *fileWriter) Write(p []byte) (n int, err error) { + nn, err := fw.ReadFrom(bytes.NewReader(p)) + return int(nn), err +} + +// ReadFrom reads reader r until io.EOF writing the contents at the current +// offset. +func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { + if fw.err != nil { + return 0, fw.err + } + + nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) + + // We should forward the offset, whether or not there was an error. + // Basically, we keep the filewriter in sync with the reader's head. If an + // error is encountered, the whole thing should be retried but we proceed + // from an expected offset, even if the data didn't make it to the + // backend. + fw.offset += nn + + if fw.offset > fw.size { + fw.size = fw.offset + } + + return nn, err +} + +// Seek moves the write position do the requested offest based on the whence +// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. +func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { + if fw.err != nil { + return 0, fw.err + } + + var err error + newOffset := fw.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fw.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + // No problems, set the offset. + fw.offset = newOffset + } + + return fw.offset, err +} + +// Close closes the fileWriter for writing. +// Calling it once is valid and correct and it will +// return a nil error. Calling it subsequent times will +// detect that fw.err has been set and will return the error. +func (fw *fileWriter) Close() error { + if fw.err != nil { + return fw.err + } + + fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go new file mode 100644 index 00000000..858b0327 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go @@ -0,0 +1,262 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "io" + "os" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +// TestSimpleWrite takes the fileWriter through common write operations +// ensuring data integrity. +func TestSimpleWrite(t *testing.T) { + content := make([]byte, 1<<20) + n, err := rand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read did't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + ctx := context.Background() + + fw, err := newFileWriter(ctx, driver, path) + if err != nil { + t.Fatalf("unexpected error creating fileWriter: %v", err) + } + defer fw.Close() + + n, err = fw.Write(content) + if err != nil { + t.Fatalf("unexpected error writing content: %v", err) + } + fw.Flush() + + if n != len(content) { + t.Fatalf("unexpected write length: %d != %d", n, len(content)) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify write data") + } + + // Check the seek position is equal to the content length + end, err := fw.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("write did not advance offset: %d != %d", end, len(content)) + } + + // Double the content + doubled := append(content, content...) + doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) + if err != nil { + t.Fatalf("unexpected error digesting doubled content: %v", err) + } + + nn, err := fw.ReadFrom(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error doubling content: %v", err) + } + + if nn != int64(len(content)) { + t.Fatalf("writeat was short: %d != %d", n, len(content)) + } + + fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + verifier, err = digest.NewDigestVerifier(doubledgst) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify write data") + } + + // Check that Write updated the offset. + end, err = fw.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if end != int64(len(doubled)) { + t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) + } + + // Now, we copy from one path to another, running the data through the + // fileReader to fileWriter, rather than the driver.Move command to ensure + // everything is working correctly. + fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + fw, err = newFileWriter(ctx, driver, "/copied") + if err != nil { + t.Fatalf("unexpected error creating fileWriter: %v", err) + } + defer fw.Close() + + nn, err = io.Copy(fw, fr) + if err != nil { + t.Fatalf("unexpected error copying data: %v", err) + } + + if nn != int64(len(doubled)) { + t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) + } + + fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + verifier, err = digest.NewDigestVerifier(doubledgst) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify write data") + } +} + +func TestBufferedFileWriter(t *testing.T) { + ctx := context.Background() + writer, err := newFileWriter(ctx, inmemory.New(), "/random") + + if err != nil { + t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) + } + + // write one byte and ensure the offset hasn't been incremented. + // offset will only get incremented when the buffer gets flushed + short := []byte{byte(1)} + + writer.Write(short) + + if writer.offset > 0 { + t.Fatalf("WriteStream called prematurely") + } + + // write enough data to cause the buffer to flush and confirm + // the offset has been incremented + long := make([]byte, fileWriterBufferSize) + _, err = rand.Read(long) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + for i := range long { + long[i] = byte(i) + } + writer.Write(long) + writer.Close() + if writer.offset != (fileWriterBufferSize + 1) { + t.Fatalf("WriteStream not called when buffer capacity reached") + } +} + +func BenchmarkFileWriter(b *testing.B) { + b.StopTimer() // not sure how long setup above will take + for i := 0; i < b.N; i++ { + // Start basic fileWriter initialization + fw := fileWriter{ + driver: inmemory.New(), + path: "/random", + } + ctx := context.Background() + if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // ignore, offset is zero + default: + b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) + } + } else { + if fi.IsDir() { + b.Fatalf("Cannot write to a directory") + } + + fw.size = fi.Size() + } + + randomBytes := make([]byte, 1<<20) + _, err := rand.Read(randomBytes) + if err != nil { + b.Fatalf("unexpected error building random data: %v", err) + } + // End basic file writer initialization + + b.StartTimer() + for j := 0; j < 100; j++ { + fw.Write(randomBytes) + } + b.StopTimer() + } +} + +func BenchmarkBufferedFileWriter(b *testing.B) { + b.StopTimer() // not sure how long setup above will take + ctx := context.Background() + for i := 0; i < b.N; i++ { + bfw, err := newFileWriter(ctx, inmemory.New(), "/random") + + if err != nil { + b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) + } + + randomBytes := make([]byte, 1<<20) + _, err = rand.Read(randomBytes) + if err != nil { + b.Fatalf("unexpected error building random data: %v", err) + } + + b.StartTimer() + for j := 0; j < 100; j++ { + bfw.Write(randomBytes) + } + b.StopTimer() + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go new file mode 100644 index 00000000..2ba62a95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -0,0 +1,301 @@ +package storage + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPath allows one to control the repository blob link set to which + // the blob store dispatches. This is required because manifest and layer + // blobs have not yet been fully merged. At some point, this functionality + // should be removed an the blob links folder should be merged. + linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst, err := digest.FromBytes(p) + if err != nil { + return distribution.Descriptor{}, err + } + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := lbs.pm.path(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { + fw, err := newFileWriter(ctx, lbs.driver, path) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + bufferedFileWriter: *fw, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPath allows one to control the repository blob link set to which + // the blob store dispatches. This is required because manifest and layer + // blobs have not yet been fully merged. At some point, this functionality + // should be removed an the blob links folder should be merged. + linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + target, err := lbs.blobStore.readlink(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): For backwards compatibility with data in "_layers", we + // need to hit layerLinkPath, as well. Or, somehow migrate to the new path + // layout. + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + return lbs.blobStore.driver.Delete(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go new file mode 100644 index 00000000..c8c19d43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go @@ -0,0 +1,144 @@ +package storage + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +type manifestStore struct { + repository *repository + revisionStore *revisionStore + tagStore *tagStore + ctx context.Context + skipDependencyVerification bool +} + +var _ distribution.ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.revisionStore.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + return ms.revisionStore.get(ms.ctx, dgst) +} + +// SkipLayerVerification allows a manifest to be Put before it's +// layers are on the filesystem +func SkipLayerVerification(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifeststore") +} + +func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + if err := ms.verifyManifest(ms.ctx, manifest); err != nil { + return err + } + + // Store the revision of the manifest + revision, err := ms.revisionStore.put(ms.ctx, manifest) + if err != nil { + return err + } + + // Now, tag the manifest + return ms.tagStore.tag(manifest.Tag, revision.Digest) +} + +// Delete removes the revision of the specified manfiest. +func (ms *manifestStore) Delete(dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.revisionStore.delete(ms.ctx, dgst) +} + +func (ms *manifestStore) Tags() ([]string, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Tags") + return ms.tagStore.tags() +} + +func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).ExistsByTag") + return ms.tagStore.exists(tag) +} + +func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") + dgst, err := ms.tagStore.resolve(tag) + if err != nil { + return nil, err + } + + return ms.revisionStore.get(ms.ctx, dgst) +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *manifest.SignedManifest) error { + var errs distribution.ErrManifestVerification + if mnfst.Name != ms.repository.Name() { + errs = append(errs, fmt.Errorf("repository name does not match manifest name")) + } + + if _, err := manifest.Verify(mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !ms.skipDependencyVerification { + for _, fsLayer := range mnfst.FSLayers { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go new file mode 100644 index 00000000..a4ce9149 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -0,0 +1,364 @@ +package storage + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type manifestStoreTestEnv struct { + ctx context.Context + driver driver.StorageDriver + registry distribution.Namespace + repository distribution.Repository + name string + tag string +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + ctx := context.Background() + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + + repo, err := registry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + return &manifestStoreTestEnv{ + ctx: ctx, + driver: driver, + registry: registry, + repository: repo, + name: name, + tag: tag, + } +} + +func TestManifestStorage(t *testing.T) { + env := newManifestStoreTestEnv(t, "foo/bar", "thetag") + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + exists, err := ms.ExistsByTag(env.tag) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %v", err) + } + + if exists { + t.Fatalf("manifest should not exist") + } + + if _, err := ms.GetByTag(env.tag); true { + switch err.(type) { + case distribution.ErrManifestUnknown: + break + default: + t.Fatalf("expected manifest unknown error: %#v", err) + } + } + + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: env.name, + Tag: env.tag, + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + m.FSLayers = append(m.FSLayers, manifest.FSLayer{ + BlobSum: dgst, + }) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, merr := manifest.Sign(&m, pk) + if merr != nil { + t.Fatalf("error signing manifest: %v", err) + } + + err = ms.Put(sm) + if err == nil { + t.Fatalf("expected errors putting manifest with full verification") + } + + switch err := err.(type) { + case distribution.ErrManifestVerification: + if len(err) != 2 { + t.Fatalf("expected 2 verification errors: %#v", err) + } + + for _, err := range err { + if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { + t.Fatalf("unexpected error type: %v", err) + } + } + default: + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + // Now, upload the layers that were missing! + for dgst, rs := range testLayers { + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + if err = ms.Put(sm); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + exists, err = ms.ExistsByTag(env.tag) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %v", err) + } + + if !exists { + t.Fatalf("manifest should exist") + } + + fetchedManifest, err := ms.GetByTag(env.tag) + + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if !reflect.DeepEqual(fetchedManifest, sm) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + payload, err := fetchedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting payload: %v", err) + } + + // Now that we have a payload, take a moment to check that the manifest is + // return by the payload digest. + dgst, err := digest.FromBytes(payload) + if err != nil { + t.Fatalf("error getting manifest digest: %v", err) + } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("error checking manifest existence by digest: %v", err) + } + + if !exists { + t.Fatalf("manifest %s should exist", dgst) + } + + fetchedByDigest, err := ms.Get(dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest by digest: %v", err) + } + + if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) + } + + sigs, err := fetchedJWS.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) + } + + // Grabs the tags and check that this tagged manifest is present + tags, err := ms.Tags() + if err != nil { + t.Fatalf("unexpected error fetching tags: %v", err) + } + + if len(tags) != 1 { + t.Fatalf("unexpected tags returned: %v", tags) + } + + if tags[0] != env.tag { + t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{env.tag}) + } + + // Now, push the same manifest with a different key + pk2, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm2, err := manifest.Sign(&m, pk2) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures") + if err != nil { + t.Fatalf("error parsing signature: %v", err) + } + + sigs2, err := jws2.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs2) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) + } + + if err = ms.Put(sm2); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + fetched, err := ms.GetByTag(env.tag) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if _, err := manifest.Verify(fetched); err != nil { + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + // Assemble our payload and two signatures to get what we expect! + expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0]) + if err != nil { + t.Fatalf("unexpected error merging jws: %v", err) + } + + expectedSigs, err := expectedJWS.Signatures() + if err != nil { + t.Fatalf("unexpected error getting expected signatures: %v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + receivedPayload, err := receivedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting received payload: %v", err) + } + + if !bytes.Equal(receivedPayload, payload) { + t.Fatalf("payloads are not equal") + } + + receivedSigs, err := receivedJWS.Signatures() + if err != nil { + t.Fatalf("error getting signatures: %v", err) + } + + for i, sig := range receivedSigs { + if !bytes.Equal(sig, expectedSigs[i]) { + t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) + } + } + + // Test deleting manifests + err = ms.Delete(dgst) + if err != nil { + t.Fatalf("unexpected an error deleting manifest by digest: %v", err) + } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if exists { + t.Errorf("Deleted manifest should not exist") + } + + deletedManifest, err := ms.Get(dgst) + if err == nil { + t.Errorf("Unexpected success getting deleted manifest") + } + switch err.(type) { + case distribution.ErrManifestUnknownRevision: + break + default: + t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) + } + + if deletedManifest != nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + // Re-upload should restore manifest to a good state + err = ms.Put(sm) + if err != nil { + t.Errorf("Error re-uploading deleted manifest") + } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if !exists { + t.Errorf("Restored manifest should exist") + } + + deletedManifest, err = ms.Get(dgst) + if err != nil { + t.Errorf("Unexpected error getting manifest") + } + if deletedManifest == nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + repo, err := r.Repository(ctx, env.name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ms, err = repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + err = ms.Delete(dgst) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go new file mode 100644 index 00000000..35debddf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go @@ -0,0 +1,509 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const storagePathVersion = "v2" + +// pathMapper maps paths based on "object names" and their ids. The "object +// names" mapped by pathMapper are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// -> signatures +// //link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// hashstates// +// -> blob/ +// +// +// The storage backend layout is broken up into a content- addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload id. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. Signatures are separated +// from the manifest payload data and linked into the blob store, as well. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ +// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Blobs: +// +// layerLinkPathSpec: /v2/repositories//_layers///link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// +// Blob Store: +// +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +type pathMapper struct { + root string + version string // should be a constant? +} + +var defaultPathMapper = &pathMapper{ + root: "/docker/registry/", + version: storagePathVersion, +} + +// path returns the path identified by spec. +func (pm *pathMapper) path(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{pm.root, pm.version} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pm.path(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestSignaturesPathSpec: + root, err := pm.path(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "signatures"), nil + case manifestSignatureLinkPathSpec: + root, err := pm.path(manifestSignaturesPathSpec{ + name: v.name, + revision: v.revision, + }) + if err != nil { + return "", err + } + + signatureComponents, err := digestPathComponents(v.signature, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pm.path(manifestTagsPathSpec{ + name: v.name, + }) + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pm.path(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pm.path(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pm.path(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagIndexEntryPathSpec: + root, err := pm.path(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(components...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. + + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestSignaturesPathSpec decribes the path components for the directory +// containing all the signatures for the target blob. Entries are named with +// the underlying key id. +type manifestSignaturesPathSpec struct { + name string + revision digest.Digest +} + +func (manifestSignaturesPathSpec) pathSpec() {} + +// manifestSignatureLinkPathSpec decribes the path components used to look up +// a signature file by the hash of its blob. +type manifestSignatureLinkPathSpec struct { + name string + revision digest.Digest + signature digest.Digest +} + +func (manifestSignatureLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths +// should be "safe" before getting this far due to strict digest requirements +// but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// // blobPathSpec contains the path for the registry global blob store. +// type blobPathSpec struct { +// digest digest.Digest +// } + +// func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + id string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + name string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// Most importantly, for tarsum, the layout looks like this: +// +// tarsum/// +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { + // We have a tarsum! + version := tsi.Version + if version == "" { + version = "v0" + } + + prefix = []string{ + "tarsum", + version, + tsi.Algorithm, + } + } + + return append(prefix, suffix...), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go new file mode 100644 index 00000000..3d17b377 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go @@ -0,0 +1,146 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestPathMapper(t *testing.T) { + pm := &pathMapper{ + root: "/pathmapper-test", + } + + for _, testcase := range []struct { + spec pathSpec + expected string + err error + }{ + { + spec: manifestRevisionPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", + }, + { + spec: manifestRevisionLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", + }, + { + spec: manifestSignatureLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + signature: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", + }, + { + spec: manifestSignaturesPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", + }, + { + spec: manifestTagsPathSpec{ + name: "foo/bar", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags", + }, + { + spec: manifestTagPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag", + }, + { + spec: manifestTagCurrentPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/current/link", + }, + { + spec: manifestTagIndexPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index", + }, + { + spec: manifestTagIndexEntryPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", + }, + { + spec: manifestTagIndexEntryLinkPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", + }, + { + spec: layerLinkPathSpec{ + name: "foo/bar", + digest: "tarsum.v1+test:abcdef", + }, + expected: "/pathmapper-test/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", + }, + { + spec: blobDataPathSpec{ + digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), + }, + expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", + }, + { + spec: blobDataPathSpec{ + digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), + }, + expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", + }, + + { + spec: uploadDataPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + }, + { + spec: uploadStartedAtPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + }, + } { + p, err := pm.path(testcase.spec) + if err != nil { + t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) + } + + if p != testcase.expected { + t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) + } + } + + // Add a few test cases to ensure we cover some errors + + // Specify a path that requires a revision and get a digest validation error. + badpath, err := pm.path(manifestSignaturesPathSpec{ + name: "foo/bar", + }) + if err == nil { + t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go new file mode 100644 index 00000000..c66f8881 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -0,0 +1,138 @@ +package storage + +import ( + "path" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(ctx, driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(ctx, uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uUIDFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uUIDFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uUIDFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go new file mode 100644 index 00000000..18c98af8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go @@ -0,0 +1,168 @@ +package storage + +import ( + "path" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/uuid" +) + +var pm = defaultPathMapper + +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { + d := inmemory.New() + ctx := context.Background() + for i := 0; i < numUploads; i++ { + addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) + } + return d, ctx +} + +func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pm.path(uploadDataPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + + if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + t.Fatalf("Unable to write startedAt file") + } + +} + +func TestPurgeGather(t *testing.T) { + uploadCount := 5 + fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(ctx, fs) + if len(errs) != 0 { + t.Errorf("Unexepected errors: %q", errs) + } + if len(uploadData) != uploadCount { + t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) + } +} + +func TestPurgeNone(t *testing.T) { + fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) + oneHourAgo := time.Now().Add(-1 * time.Hour) + deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + if len(deleted) != 0 { + t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) + } +} + +func TestPurgeAll(t *testing.T) { + uploadCount := 10 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + + // Ensure > 1 repos are purged + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) + uploadCount++ + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + fileCount := uploadCount + if len(deleted) != fileCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), fileCount) + } +} + +func TestPurgeSome(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + + newUploadCount := 4 + + for i := 0; i < newUploadCount; i++ { + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + if len(deleted) != oldUploadCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), oldUploadCount) + } +} + +func TestPurgeOnlyUploads(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + + // Create a directory tree outside _uploads and ensure + // these files aren't deleted. + dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) + if err != nil { + t.Fatalf(err.Error()) + } + nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) + if strings.Index(nonUploadPath, "_upload") != -1 { + t.Fatalf("Non-upload path not created correctly") + } + + nonUploadFile := path.Join(nonUploadPath, "file") + if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + for _, file := range deleted { + if strings.Index(file, "_upload") == -1 { + t.Errorf("Non-upload file deleted") + } + } +} + +func TestPurgeMissingStartedAt(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) + + err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + + if file == "startedat" { + if err := fs.Delete(ctx, filePath); err != nil { + t.Fatalf("Unable to delete startedat file: %s", filePath) + } + } + return nil + }) + if err != nil { + t.Fatalf("Unexpected error during Walk: %s ", err.Error()) + } + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) > 0 { + t.Errorf("Unexpected errors") + } + if len(deleted) > 0 { + t.Errorf("Files unexpectedly deleted: %s", deleted) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go new file mode 100644 index 00000000..c5058b80 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go @@ -0,0 +1,185 @@ +package storage + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + blobStore *blobStore + blobServer distribution.BlobServer + statter distribution.BlobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool +} + +// NewRegistryWithDriver creates a new registry instance from the provided +// driver. The resulting registry may be shared by multiple goroutines but is +// cheap to allocate. If redirect is true, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +// +// TODO(stevvooe): This function signature is getting very out of hand. Move to +// functional options for instance configuration. +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool, isCache bool) distribution.Namespace { + // create global statter, with cache. + var statter distribution.BlobDescriptorService = &blobStatter{ + driver: driver, + pm: defaultPathMapper, + } + + if blobDescriptorCacheProvider != nil { + statter = cache.NewCachedBlobStatter(blobDescriptorCacheProvider, statter) + } + + bs := &blobStore{ + driver: driver, + pm: defaultPathMapper, + statter: statter, + } + + return ®istry{ + blobStore: bs, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + redirect: redirect, + }, + blobDescriptorCacheProvider: blobDescriptorCacheProvider, + deleteEnabled: deleteEnabled, + resumableDigestEnabled: !isCache, + } +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + if err := v2.ValidateRepositoryName(name); err != nil { + return nil, distribution.ErrRepositoryNameInvalid{ + Name: name, + Reason: err, + } + } + + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(name) + if err != nil { + return nil, err + } + } + + return &repository{ + ctx: ctx, + registry: reg, + name: name, + descriptorCache: descriptorCache, + }, nil +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name string + descriptorCache distribution.BlobDescriptorService +} + +// Name returns the name of the repository. +func (repo *repository) Name() string { + return repo.name +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + ms := &manifestStore{ + ctx: ctx, + repository: repo, + revisionStore: &revisionStore{ + ctx: ctx, + repository: repo, + blobStore: &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPath: manifestRevisionLinkPath, + }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPath: manifestRevisionLinkPath, + }, + }, + tagStore: &tagStore{ + ctx: ctx, + repository: repo, + blobStore: repo.registry.blobStore, + }, + } + + // Apply options + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + return ms, nil +} + +// Blobs returns an instance of the BlobStore. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPath: blobLinkPath, + } + + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + + return &linkedBlobStore{ + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPath: blobLinkPath, + deleteEnabled: repo.registry.deleteEnabled, + } +} + +func (repo *repository) Signatures() distribution.SignatureService { + return &signatureStore{ + repository: repo, + blobStore: repo.blobStore, + ctx: repo.ctx, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go new file mode 100644 index 00000000..9dea78e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go @@ -0,0 +1,111 @@ +package storage + +import ( + "encoding/json" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +// revisionStore supports storing and managing manifest revisions. +type revisionStore struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +// get retrieves the manifest, keyed by revision digest. +func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*manifest.SignedManifest, error) { + // Ensure that this revision is available in this repository. + _, err := rs.blobStore.Stat(ctx, revision) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: rs.repository.Name(), + Revision: revision, + } + } + + return nil, err + } + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := rs.blobStore.Get(ctx, revision) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: rs.repository.Name(), + Revision: revision, + } + } + + return nil, err + } + + // Fetch the signatures for the manifest + signatures, err := rs.repository.Signatures().Get(revision) + if err != nil { + return nil, err + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm manifest.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + + return &sm, nil +} + +// put stores the manifest in the repository, if not already present. Any +// updated signatures will be stored, as well. +func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) (distribution.Descriptor, error) { + // Resolve the payload in the manifest. + payload, err := sm.Payload() + if err != nil { + return distribution.Descriptor{}, err + } + + // Digest and store the manifest payload in the blob store. + revision, err := rs.blobStore.Put(ctx, manifest.ManifestMediaType, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return distribution.Descriptor{}, err + } + + // Link the revision into the repository. + if err := rs.blobStore.linkBlob(ctx, revision); err != nil { + return distribution.Descriptor{}, err + } + + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return distribution.Descriptor{}, err + } + + if err := rs.repository.Signatures().Put(revision.Digest, signatures...); err != nil { + return distribution.Descriptor{}, err + } + + return revision, nil +} + +func (rs *revisionStore) delete(ctx context.Context, revision digest.Digest) error { + return rs.blobStore.Delete(ctx, revision) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go new file mode 100644 index 00000000..78fd2e6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go @@ -0,0 +1,141 @@ +package storage + +import ( + "path" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +type signatureStore struct { + repository *repository + blobStore *blobStore + ctx context.Context +} + +func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobStore) *signatureStore { + return &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + } +} + +var _ distribution.SignatureService = &signatureStore{} + +func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { + signaturesPath, err := s.blobStore.pm.path(manifestSignaturesPathSpec{ + name: s.repository.Name(), + revision: dgst, + }) + + if err != nil { + return nil, err + } + + // Need to append signature digest algorithm to path to get all items. + // Perhaps, this should be in the pathMapper but it feels awkward. This + // can be eliminated by implementing listAll on drivers. + signaturesPath = path.Join(signaturesPath, "sha256") + + signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) + if err != nil { + return nil, err + } + + var wg sync.WaitGroup + type result struct { + index int + signature []byte + err error + } + ch := make(chan result) + + bs := s.linkedBlobStore(s.ctx, dgst) + for i, sigPath := range signaturePaths { + sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) + if err != nil { + context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) + continue + } + + wg.Add(1) + go func(idx int, sigdgst digest.Digest) { + defer wg.Done() + context.GetLogger(s.ctx). + Debugf("fetching signature %q", sigdgst) + + r := result{index: idx} + + if p, err := bs.Get(s.ctx, sigdgst); err != nil { + context.GetLogger(s.ctx). + Errorf("error fetching signature %q: %v", sigdgst, err) + r.err = err + } else { + r.signature = p + } + + ch <- r + }(i, sigdgst) + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + // aggregrate the results + signatures := make([][]byte, len(signaturePaths)) +loop: + for { + select { + case result := <-ch: + signatures[result.index] = result.signature + if result.err != nil && err == nil { + // only set the first one. + err = result.err + } + case <-done: + break loop + } + } + + return signatures, err +} + +func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { + bs := s.linkedBlobStore(s.ctx, dgst) + for _, signature := range signatures { + if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { + return err + } + } + return nil +} + +// linkedBlobStore returns the namedBlobStore of the signatures for the +// manifest with the given digest. Effectively, each signature link path +// layout is a unique linked blob store. +func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { + linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestSignatureLinkPathSpec{ + name: name, + revision: revision, + signature: dgst, + }) + } + + return &linkedBlobStore{ + ctx: ctx, + repository: s.repository, + blobStore: s.blobStore, + blobAccessController: &linkedBlobStatter{ + blobStore: s.blobStore, + repository: s.repository, + linkPath: linkpath, + }, + linkPath: linkpath, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go new file mode 100644 index 00000000..a74d9b09 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go @@ -0,0 +1,143 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// tagStore provides methods to manage manifest tags in a backend storage driver. +type tagStore struct { + repository *repository + blobStore *blobStore + ctx context.Context +} + +// tags lists the manifest tags for the specified repository. +func (ts *tagStore) tags() ([]string, error) { + p, err := ts.blobStore.pm.path(manifestTagPathSpec{ + name: ts.repository.Name(), + }) + if err != nil { + return nil, err + } + + var tags []string + entries, err := ts.blobStore.driver.List(ts.ctx, p) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} + default: + return nil, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(tag string) (bool, error) { + tagPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + if err != nil { + return false, err + } + + exists, err := exists(ts.ctx, ts.blobStore.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) tag(tag string, revision digest.Digest) error { + currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + + if err != nil { + return err + } + + nbs := ts.linkedBlobStore(ts.ctx, tag) + // Link into the index + if err := nbs.linkBlob(ts.ctx, distribution.Descriptor{Digest: revision}); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(ts.ctx, currentPath, revision) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) resolve(tag string) (digest.Digest, error) { + currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + if err != nil { + return "", err + } + + revision, err := ts.blobStore.readlink(ts.ctx, currentPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return "", distribution.ErrManifestUnknown{Name: ts.repository.Name(), Tag: tag} + } + + return "", err + } + + return revision, nil +} + +// delete removes the tag from repository, including the history of all +// revisions that have the specified tag. +func (ts *tagStore) delete(tag string) error { + tagPath, err := ts.blobStore.pm.path(manifestTagPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + if err != nil { + return err + } + + return ts.blobStore.driver.Delete(ts.ctx, tagPath) +} + +// namedBlobStore returns the namedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPath: func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + }, + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go new file mode 100644 index 00000000..773d7ba0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go new file mode 100644 index 00000000..46b8096b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go @@ -0,0 +1,67 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + pm: defaultPathMapper, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + pm *pathMapper + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.ParseDigest(dgst) + if err != nil { + return err + } + + blobPath, err := v.pm.path(blobDataPathSpec{digest: d}) + if err != nil { + return err + } + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := v.pm.path(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go new file mode 100644 index 00000000..8290f167 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go @@ -0,0 +1,51 @@ +package storage + +import ( + "errors" + "fmt" + + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// SkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + for _, child := range children { + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + Walk(ctx, driver, child, f) + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go new file mode 100644 index 00000000..40b8547c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go @@ -0,0 +1,121 @@ +package storage + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { + d := inmemory.New() + c := []byte("") + ctx := context.Background() + if err := d.PutContent(ctx, "/a/b/c/d", c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + if err := d.PutContent(ctx, "/a/b/c/e", c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + + expected := map[string]string{ + "/a": "dir", + "/a/b": "dir", + "/a/b/c": "dir", + "/a/b/c/d": "file", + "/a/b/c/e": "file", + } + + return d, expected, ctx +} + +func TestWalkErrors(t *testing.T) { + d, expected, ctx := testFS(t) + fileCount := len(expected) + err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Error("Expected invalid root err") + } + + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + // error on the 2nd file + if fileInfo.Path() == "/a/b" { + return fmt.Errorf("Early termination") + } + delete(expected, fileInfo.Path()) + return nil + }) + if len(expected) != fileCount-1 { + t.Error("Walk failed to terminate with error") + } + if err != nil { + t.Error(err.Error()) + } + + err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Errorf("Expected missing file err") + } + +} + +func TestWalk(t *testing.T) { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + filetype, ok := expected[filePath] + if !ok { + t.Fatalf("Unexpected file in walk: %q", filePath) + } + + if fileInfo.IsDir() { + if filetype != "dir" { + t.Errorf("Unexpected file type: %q", filePath) + } + } else { + if filetype != "file" { + t.Errorf("Unexpected file type: %q", filePath) + } + } + delete(expected, filePath) + return nil + }) + if len(expected) > 0 { + t.Errorf("Missed files in walk: %q", expected) + } + if err != nil { + t.Fatalf(err.Error()) + } +} + +func TestWalkSkipDir(t *testing.T) { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + if filePath == "/a/b" { + // skip processing /a/b/c and /a/b/c/d + return ErrSkipDir + } + delete(expected, filePath) + return nil + }) + if err != nil { + t.Fatalf(err.Error()) + } + if _, ok := expected["/a/b/c"]; !ok { + t.Errorf("/a/b/c not skipped") + } + if _, ok := expected["/a/b/c/d"]; !ok { + t.Errorf("/a/b/c/d not skipped") + } + if _, ok := expected["/a/b/c/e"]; !ok { + t.Errorf("/a/b/c/e not skipped") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go new file mode 100644 index 00000000..00cd8a6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go @@ -0,0 +1,148 @@ +package testutil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" +) + +// RequestResponseMap is an ordered mapping from Requests to Responses +type RequestResponseMap []RequestResponseMapping + +// RequestResponseMapping defines a Response to be sent in response to a given +// Request +type RequestResponseMapping struct { + Request Request + Response Response +} + +// Request is a simplified http.Request object +type Request struct { + // Method is the http method of the request, for example GET + Method string + + // Route is the http route of this request + Route string + + // QueryParams are the query parameters of this request + QueryParams map[string][]string + + // Body is the byte contents of the http request + Body []byte + + // Headers are the header for this request + Headers http.Header +} + +func (r Request) String() string { + queryString := "" + if len(r.QueryParams) > 0 { + keys := make([]string, 0, len(r.QueryParams)) + queryParts := make([]string, 0, len(r.QueryParams)) + for k := range r.QueryParams { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + for _, val := range r.QueryParams[k] { + queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) + } + } + queryString = "?" + strings.Join(queryParts, "&") + } + var headers []string + if len(r.Headers) > 0 { + var headerKeys []string + for k := range r.Headers { + headerKeys = append(headerKeys, k) + } + sort.Strings(headerKeys) + + for _, k := range headerKeys { + for _, val := range r.Headers[k] { + headers = append(headers, fmt.Sprintf("%s:%s", k, val)) + } + } + + } + return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) +} + +// Response is a simplified http.Response object +type Response struct { + // Statuscode is the http status code of the Response + StatusCode int + + // Headers are the http headers of this Response + Headers http.Header + + // Body is the response body + Body []byte +} + +// testHandler is an http.Handler with a defined mapping from Request to an +// ordered list of Response objects +type testHandler struct { + responseMap map[string][]Response +} + +// NewHandler returns a new test handler that responds to defined requests +// with specified responses +// Each time a Request is received, the next Response is returned in the +// mapping, until no Responses are defined, at which point a 404 is sent back +func NewHandler(requestResponseMap RequestResponseMap) http.Handler { + responseMap := make(map[string][]Response) + for _, mapping := range requestResponseMap { + responses, ok := responseMap[mapping.Request.String()] + if ok { + responseMap[mapping.Request.String()] = append(responses, mapping.Response) + } else { + responseMap[mapping.Request.String()] = []Response{mapping.Response} + } + } + return &testHandler{responseMap: responseMap} +} + +func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + requestBody, _ := ioutil.ReadAll(r.Body) + request := Request{ + Method: r.Method, + Route: r.URL.Path, + QueryParams: r.URL.Query(), + Body: requestBody, + Headers: make(map[string][]string), + } + + // Add headers of interest here + for k, v := range r.Header { + if k == "If-None-Match" { + request.Headers[k] = v + } + } + + responses, ok := app.responseMap[request.String()] + + if !ok || len(responses) == 0 { + http.NotFound(w, r) + return + } + + response := responses[0] + app.responseMap[request.String()] = responses[1:] + + responseHeader := w.Header() + for k, v := range response.Headers { + responseHeader[k] = v + } + + w.WriteHeader(response.StatusCode) + + io.Copy(w, bytes.NewReader(response.Body)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go new file mode 100644 index 00000000..08b796f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go @@ -0,0 +1,95 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "time" + + "github.com/docker/docker/pkg/tarsum" +) + +// CreateRandomTarFile creates a random tarfile, returning it as an +// io.ReadSeeker along with its tarsum. An error is returned if there is a +// problem generating valid content. +func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := rand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + reader := bytes.NewReader(target.Bytes()) + + // A tar builder that supports tarsum inline calculation would be awesome + // here. + ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(ioutil.Discard, ts) + if nn != int64(len(target.Bytes())) { + return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) + } + + if err != nil { + return nil, "", err + } + + return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 00000000..d433ccaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go new file mode 100644 index 00000000..09c3a7bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/print.go b/Godeps/_workspace/src/github.com/docker/distribution/version/print.go new file mode 100644 index 00000000..a82bce39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/print.go @@ -0,0 +1,26 @@ +package version + +import ( + "fmt" + "io" + "os" +) + +// FprintVersion outputs the version string to the writer, in the following +// format, followed by a newline: +// +// +// +// For example, a binary "registry" built from github.com/docker/distribution +// with version "v2.0" would print the following: +// +// registry github.com/docker/distribution v2.0 +// +func FprintVersion(w io.Writer) { + fmt.Fprintln(w, os.Args[0], Package, Version) +} + +// PrintVersion outputs the version information, from Fprint, to stdout. +func PrintVersion() { + FprintVersion(os.Stdout) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go new file mode 100644 index 00000000..3a542f9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go @@ -0,0 +1,11 @@ +package version + +// Package is the overall, canonical project import path under which the +// package was built. +var Package = "github.com/docker/distribution" + +// Version indicates which version of the binary is running. This is set to +// the latest release tag by hand, always suffixed by "+unknown". During +// build, it will be replaced by the actual version. The value here will be +// used if the registry is run after a go get based install. +var Version = "v2.0.0+unknown" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh b/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh new file mode 100644 index 00000000..53e29ce9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This bash script outputs the current, desired content of version.go, using +# git describe. For best effect, pipe this to the target file. Generally, this +# only needs to updated for releases. The actual value of will be replaced +# during build time if the makefile is used. + +set -e + +cat <" instruction. + addTrustedFlags(cmd, true) + + cmd.ParseFlags(args, true) + + var ( + context io.ReadCloser + isRemote bool + err error + ) + + _, err = exec.LookPath("git") + hasGit := err == nil + + specifiedContext := cmd.Arg(0) + + var ( + contextDir string + tempDir string + relDockerfile string + ) + + switch { + case specifiedContext == "-": + tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) + case urlutil.IsGitURL(specifiedContext) && hasGit: + tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) + case urlutil.IsURL(specifiedContext): + tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName) + default: + contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) + } + + if err != nil { + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + // Resolve the FROM lines in the Dockerfile to trusted digest references + // using Notary. On a successful build, we must tag the resolved digests + // to the original name specified in the Dockerfile. + newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference) + if err != nil { + return fmt.Errorf("unable to process Dockerfile: %v", err) + } + defer newDockerfile.Close() + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + var includes = []string{"."} + + excludes, err := utils.ReadDockerIgnore(path.Join(contextDir, ".dockerignore")) + if err != nil { + return err + } + + if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by ValidateContextDirectory above. + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile) + + // Setup an upload progress bar + // FIXME: ProgressReader shouldn't be this annoying to use + sf := streamformatter.NewStreamFormatter() + var body io.Reader = progressreader.New(progressreader.Config{ + In: context, + Out: cli.out, + Formatter: sf, + NewLines: true, + ID: "", + Action: "Sending build context to Docker daemon", + }) + + var memory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + // Send the build context + v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, tag := parsers.ParseRepositoryTag(*tag) + if err := registry.ValidateRepositoryName(repository); err != nil { + return err + } + if len(tag) > 0 { + if err := tags.ValidateTagName(tag); err != nil { + return err + } + } + } + + v.Set("t", *tag) + + if *suppressOutput { + v.Set("q", "1") + } + if isRemote { + v.Set("remote", cmd.Arg(0)) + } + if *noCache { + v.Set("nocache", "1") + } + if *rm { + v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + + if *forceRm { + v.Set("forcerm", "1") + } + + if *pull { + v.Set("pull", "1") + } + + v.Set("cpusetcpus", *flCPUSetCpus) + v.Set("cpusetmems", *flCPUSetMems) + v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) + v.Set("cpuquota", strconv.FormatInt(*flCPUQuota, 10)) + v.Set("cpuperiod", strconv.FormatInt(*flCPUPeriod, 10)) + v.Set("memory", strconv.FormatInt(memory, 10)) + v.Set("memswap", strconv.FormatInt(memorySwap, 10)) + v.Set("cgroupparent", *flCgroupParent) + + v.Set("dockerfile", relDockerfile) + + ulimitsVar := flUlimits.GetList() + ulimitsJSON, err := json.Marshal(ulimitsVar) + if err != nil { + return err + } + v.Set("ulimits", string(ulimitsJSON)) + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(cli.configFile.AuthConfigs) + if err != nil { + return err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + sopts := &streamOpts{ + rawTerminal: true, + in: body, + out: cli.out, + headers: headers, + } + + serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts) + + // Windows: show error message about modified file permissions. + if runtime.GOOS == "windows" { + h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")) + if err == nil { + if h.OS != "windows" { + fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + } + } + + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + + if err != nil { + return err + } + + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + + return nil +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, api.DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(api.DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + return absContextDir, relDockerfile, nil +} + +// writeToFile copies from the given reader and writes it to a file with the +// given filename. +func writeToFile(r io.Reader, filename string) error { + file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to create file: %v", err) + } + defer file.Close() + + if _, err := io.Copy(file, r); err != nil { + return fmt.Errorf("unable to write file: %v", err) + } + + return nil +} + +// getContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive to be extracted to a temporary directory used as +// the context directory. Returns the absolute path to the temporary context +// directory, the relative path of the dockerfile in that context directory, +// and a non-nil error on success. +func getContextFromReader(r io.Reader, dockerfileName string) (absContextDir, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(tarHeaderSize) + if err != nil && err != io.EOF { + return "", "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if absContextDir, err = ioutil.TempDir("", "docker-build-context-"); err != nil { + return "", "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + defer func(d string) { + if err != nil { + os.RemoveAll(d) + } + }(absContextDir) + + if !archive.IsArchive(magic) { // Input should be read as a Dockerfile. + // -f option has no meaning when we're reading it from stdin, + // so just use our default Dockerfile name + relDockerfile = api.DefaultDockerfileName + + return absContextDir, relDockerfile, writeToFile(buf, filepath.Join(absContextDir, relDockerfile)) + } + + if err := archive.Untar(buf, absContextDir, nil); err != nil { + return "", "", fmt.Errorf("unable to extract stdin to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// getContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = utils.GitClone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// getContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a context tar +// archive and stored in a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return "", "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + defer response.Body.Close() + + // Pass the response body through a progress reader. + progReader := &progressreader.Config{ + In: response.Body, + Out: out, + Formatter: streamformatter.NewStreamFormatter(), + Size: int(response.ContentLength), + NewLines: true, + ID: "", + Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL), + } + + return getContextFromReader(progReader, dockerfileName) +} + +// getContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func getContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +type trustedDockerfile struct { + *os.File + size int64 +} + +func (td *trustedDockerfile) Close() error { + td.File.Close() + return os.Remove(td.File.Name()) +} + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + repoInfo *registry.RepositoryInfo + digestRef, tagRef registry.Reference +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) { + dockerfile, err := os.Open(dockerfileName) + if err != nil { + return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err) + } + defer dockerfile.Close() + + scanner := bufio.NewScanner(dockerfile) + + // Make a tempfile to store the rewritten Dockerfile. + tempFile, err := ioutil.TempFile("", "trusted-dockerfile-") + if err != nil { + return nil, nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err) + } + + trustedFile := &trustedDockerfile{ + File: tempFile, + } + + defer func() { + if err != nil { + // Close the tempfile if there was an error during Notary lookups. + // Otherwise the caller should close it. + trustedFile.Close() + } + }() + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != "scratch" { + // Replace the line with a resolved "FROM repo@digest" + repo, tag := parsers.ParseRepositoryTag(matches[1]) + if tag == "" { + tag = tags.DEFAULTTAG + } + + repoInfo, err := registry.ParseRepositoryInfo(repo) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse repository info: %v", err) + } + + ref := registry.ParseReference(tag) + + if !ref.HasDigest() && isTrusted() { + trustedRef, err := translator(repo, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.ImageName(repo))) + resolvedTags = append(resolvedTags, &resolvedTag{ + repoInfo: repoInfo, + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + n, err := fmt.Fprintln(tempFile, line) + if err != nil { + return nil, nil, err + } + + trustedFile.size += int64(n) + } + + tempFile.Seek(0, os.SEEK_SET) + + return trustedFile, resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, newDockerfile *trustedDockerfile, dockerfileName string) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + var content io.Reader = tarReader + + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + hdr.Size = newDockerfile.size + content = newDockerfile + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go new file mode 100644 index 00000000..119b7784 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go @@ -0,0 +1,171 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/distribution/uuid" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/tlsconfig" +) + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + // initializing closure + init func() error + + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests + basePath string + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // tlsConfig holds the TLS configuration for the client, and will + // set the scheme to https in NewDockerCli if present. + tlsConfig *tls.Config + // scheme holds the scheme of the client i.e. https. + scheme string + // inFd holds the file descriptor of the client's STDIN (if valid). + inFd uintptr + // outFd holds file descriptor of the client's STDOUT (if valid). + outFd uintptr + // isTerminalIn indicates whether the client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut dindicates whether the client's STDOUT is a TTY + isTerminalOut bool + // transport holds the client transport instance. + transport *http.Transport +} + +// Initialize calls the init function that will setup the configuration for the client +// such as the TLS, tcp and other parameters used to run the client. +func (cli *DockerCli) Initialize() error { + if cli.init == nil { + return nil + } + return cli.init() +} + +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + +// PsFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}\t{{Name}}. +func (cli *DockerCli) PsFormat() string { + return cli.configFile.PsFormat +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). +func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { + cli := &DockerCli{ + in: in, + out: out, + err: err, + keyFile: clientFlags.Common.TrustKey, + } + + cli.init = func() error { + + // ignore errors from uuid package when running client commands + uuid.Loggerf = func(string, ...interface{}) {} + + clientFlags.PostParse() + + hosts := clientFlags.Common.Hosts + + switch len(hosts) { + case 0: + defaultHost := os.Getenv("DOCKER_HOST") + if defaultHost == "" { + defaultHost = opts.DefaultHost + } + defaultHost, err := opts.ValidateHost(defaultHost) + if err != nil { + return err + } + hosts = []string{defaultHost} + case 1: + // only accept one host to talk to + default: + return errors.New("Please specify only one -H") + } + + protoAddrParts := strings.SplitN(hosts[0], "://", 2) + cli.proto, cli.addr = protoAddrParts[0], protoAddrParts[1] + + if cli.proto == "tcp" { + // error is checked in pkg/parsers already + parsed, _ := url.Parse("tcp://" + cli.addr) + cli.addr = parsed.Host + cli.basePath = parsed.Path + } + + if clientFlags.Common.TLSOptions != nil { + cli.scheme = "https" + var e error + cli.tlsConfig, e = tlsconfig.Client(*clientFlags.Common.TLSOptions) + if e != nil { + return e + } + } else { + cli.scheme = "http" + } + + if cli.in != nil { + cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) + } + if cli.out != nil { + cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) + } + + // The transport is created here for reuse during the client session. + cli.transport = &http.Transport{ + TLSClientConfig: cli.tlsConfig, + } + sockets.ConfigureTCPTransport(cli.transport, cli.proto, cli.addr) + + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) + } + cli.configFile = configFile + + return nil + } + + return cli +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go new file mode 100644 index 00000000..4cfce5f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go @@ -0,0 +1,5 @@ +// Package client provides a command-line interface for Docker. +// +// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. +// See https://docs.docker.com/installation/ for instructions on installing Docker. +package client diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go new file mode 100644 index 00000000..fe4acd48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go @@ -0,0 +1,84 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" +) + +// CmdCommit creates a new image from a container's changes. +// +// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, "Create a new image from a container's changes", true) + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + cmd.Require(flag.Max, 2) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + name = cmd.Arg(0) + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + ) + + //Check if the given image name can be resolved + if repository != "" { + if err := registry.ValidateRepositoryName(repository); err != nil { + return err + } + } + + v := url.Values{} + v.Set("container", name) + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("comment", *flComment) + v.Set("author", *flAuthor) + for _, change := range flChanges.GetAll() { + v.Add("changes", change) + } + + if *flPause != true { + v.Set("pause", "0") + } + + var ( + config *runconfig.Config + response types.ContainerCommitResponse + ) + + if *flConfig != "" { + config = &runconfig.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + serverResp, err := cli.call("POST", "/commit?"+v.Encode(), config, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + return err + } + + fmt.Fprintln(cli.out, response.ID) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go new file mode 100644 index 00000000..99278adf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go @@ -0,0 +1,305 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +// CmdCp copies files/folders to or from a path in a container. +// +// When copying from a container, if LOCALPATH is '-' the data is written as a +// tar archive file to STDOUT. +// +// When copying to a container, if LOCALPATH is '-' the data is read as a tar +// archive file from STDIN, and the destination CONTAINER:PATH, must specify +// a directory. +// +// Usage: +// docker cp CONTAINER:PATH LOCALPATH|- +// docker cp LOCALPATH|- CONTAINER:PATH +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := Cli.Subcmd( + "cp", + []string{"CONTAINER:PATH LOCALPATH|-", "LOCALPATH|- CONTAINER:PATH"}, + strings.Join([]string{ + "Copy files/folders between a container and your host.\n", + "Use '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + true, + ) + + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("source can not be empty") + } + if cmd.Arg(1) == "" { + return fmt.Errorf("destination can not be empty") + } + + srcContainer, srcPath := splitCpArg(cmd.Arg(0)) + dstContainer, dstPath := splitCpArg(cmd.Arg(1)) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + switch direction { + case fromContainer: + return cli.copyFromContainer(srcContainer, srcPath, dstPath) + case toContainer: + return cli.copyToContainer(srcPath, dstContainer, dstPath) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if filepath.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} + +func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive?%s", containerName, query.Encode()) + + response, err := cli.call("HEAD", urlStr, nil, nil) + if err != nil { + return stat, err + } + defer response.body.Close() + + if response.statusCode != http.StatusOK { + return stat, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return getContainerPathStatFromHeader(response.header) +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive?%s", srcContainer, query.Encode()) + + response, err := cli.call("GET", urlStr, nil, nil) + if err != nil { + return err + } + defer response.body.Close() + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, response.body) + + return err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return fmt.Errorf("unable to get resource stat from response: %s", err) + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + } + + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(response.body, srcInfo, dstPath) +} + +func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := cli.statContainerPath(dstContainer, dstPath) + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versia) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var content io.Reader + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + srcArchive, err := archive.TarResource(srcPath) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the disired + // copy behavior. + + // Prepare source copy info. + srcInfo, err := archive.CopyInfoStatPath(srcPath, true) + if err != nil { + return err + } + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + dstPath = dstDir + content = preparedArchive + } + + query := make(url.Values, 2) + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + query.Set("noOverwriteDirNonDir", "true") + + urlStr := fmt.Sprintf("/containers/%s/archive?%s", dstContainer, query.Encode()) + + response, err := cli.stream("PUT", urlStr, &streamOpts{in: content}) + if err != nil { + return err + } + defer response.body.Close() + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go new file mode 100644 index 00000000..76e935eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go @@ -0,0 +1,185 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" +) + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + v := url.Values{} + repos, tag := parsers.ParseRepositoryTag(image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = tags.DEFAULTTAG + } + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(repos) + if err != nil { + return err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + sopts := &streamOpts{ + rawTerminal: true, + out: out, + headers: map[string][]string{"X-Registry-Auth": registryAuthHeader}, + } + if _, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts); err != nil { + return err + } + return nil +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { + containerValues := url.Values{} + if name != "" { + containerValues.Set("name", name) + } + + mergedConfig := runconfig.MergeConfigs(config, hostConfig) + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + repo, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = tags.DEFAULTTAG + } + + ref := registry.ParseReference(tag) + var trustedRef registry.Reference + + if isTrusted() && !ref.HasDigest() { + var err error + trustedRef, err = cli.trustedReference(repo, ref) + if err != nil { + return nil, err + } + config.Image = trustedRef.ImageName(repo) + } + + //create the container + serverResp, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil) + //if image not found try to pull it + if serverResp.statusCode == 404 && strings.Contains(err.Error(), config.Image) { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.ImageName(repo)) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + if trustedRef != nil && !ref.HasDigest() { + repoInfo, err := registry.ParseRepositoryInfo(repo) + if err != nil { + return nil, err + } + if err := cli.tagTrusted(repoInfo, trustedRef, ref); err != nil { + return nil, err + } + } + // Retry + if serverResp, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + defer serverResp.body.Close() + + var response types.ContainerCreateResponse + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + return nil, err + } + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} + +// CmdCreate creates a new container from a given image. +// +// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, "Create a new container", true) + addTrustedFlags(cmd, true) + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + if config.Image == "" { + cmd.Usage() + return nil + } + response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", response.ID) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go new file mode 100644 index 00000000..b955774c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdDiff shows changes on a container's filesystem. +// +// Each changed file is printed on a separate line, prefixed with a single +// character that indicates the status of the file: C (modified), A (added), +// or D (deleted). +// +// Usage: docker diff CONTAINER +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, "Inspect changes on a container's filesystem", true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + changes := []types.ContainerChange{} + if err := json.NewDecoder(serverResp.body).Decode(&changes); err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go new file mode 100644 index 00000000..c0168bdb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go @@ -0,0 +1,63 @@ +package client + +import ( + "net/url" + "time" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/timeutils" +) + +// CmdEvents prints a live stream of real time events from the server. +// +// Usage: docker events [OPTIONS] +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := Cli.Subcmd("events", nil, "Get real time events from the server", true) + since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + var ( + v = url.Values{} + eventFilterArgs = filters.Args{} + ) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } + ref := time.Now() + if *since != "" { + v.Set("since", timeutils.GetTimestamp(*since, ref)) + } + if *until != "" { + v.Set("until", timeutils.GetTimestamp(*until, ref)) + } + if len(eventFilterArgs) > 0 { + filterJSON, err := filters.ToParam(eventFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJSON) + } + sopts := &streamOpts{ + rawTerminal: true, + out: cli.out, + } + if _, err := cli.stream("GET", "/events?"+v.Encode(), sopts); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go new file mode 100644 index 00000000..d02c019b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go @@ -0,0 +1,134 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/runconfig" +) + +// CmdExec runs a command in a running container. +// +// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, "Run a command in a running container", true) + + execConfig, err := runconfig.ParseExec(cmd, args) + // just in case the ParseExec does not exit + if execConfig.Container == "" || err != nil { + return Cli.StatusError{StatusCode: 1} + } + + serverResp, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var response types.ContainerExecCreateResponse + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + return err + } + + execID := response.ID + + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + execStartCheck := &types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execStartCheck, nil)); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + hijacked = make(chan io.Closer) + errCh chan error + ) + + // Block the return until the chan gets closed + defer func() { + logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)") + } + }() + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + errCh = promise.Go(func() error { + return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) + }) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go new file mode 100644 index 00000000..4d35d54b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go @@ -0,0 +1,46 @@ +package client + +import ( + "errors" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdExport exports a filesystem as a tar archive. +// +// The tar archive is streamed to STDOUT by default or written to a file. +// +// Usage: docker export [OPTIONS] CONTAINER +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, "Export the contents of a container's filesystem as a tar archive", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + var ( + output = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + image := cmd.Arg(0) + sopts := &streamOpts{ + rawTerminal: true, + out: output, + } + if _, err := cli.stream("GET", "/containers/"+image+"/export", sopts); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go new file mode 100644 index 00000000..5853d79b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go @@ -0,0 +1,257 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { + defer func() { + if started != nil { + close(started) + } + }() + + params, err := cli.encodeData(data) + if err != nil { + return err + } + req, err := http.NewRequest(method, fmt.Sprintf("%s/v%s%s", cli.basePath, api.Version, path), params) + if err != nil { + return err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.configFile.HTTPHeaders { + req.Header.Set(k, v) + } + + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + req.Header.Set("Content-Type", "text/plain") + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + req.Host = cli.addr + + dial, err := cli.dial() + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := dial.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.inFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = promise.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminalIn { + term.RestoreTerminal(cli.inFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if runtime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal && stdout != nil { + _, err = io.Copy(stdout, br) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, br) + } + logrus.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := promise.Go(func() error { + if in != nil { + io.Copy(rwc, in) + logrus.Debugf("[hijack] End of stdin") + } + + if conn, ok := rwc.(interface { + CloseWrite() error + }); ok { + if err := conn.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminalIn { + if err := <-sendStdin; err != nil { + logrus.Debugf("Error sendStdin: %s", err) + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go new file mode 100644 index 00000000..925add66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go @@ -0,0 +1,77 @@ +package client + +import ( + "encoding/json" + "fmt" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/units" +) + +// CmdHistory shows the history of an image. +// +// Usage: docker history [OPTIONS] IMAGE +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := Cli.Subcmd("history", []string{"IMAGE"}, "Show the history of an image", true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + history := []types.ImageHistory{} + if err := json.NewDecoder(serverResp.body).Decode(&history); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + } + + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, entry.ID) + } else { + fmt.Fprintf(w, stringid.TruncateID(entry.ID)) + } + if !*quiet { + if *human { + fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) + } else { + fmt.Fprintf(w, "\t%s\t", time.Unix(entry.Created, 0).Format(time.RFC3339)) + } + + if *noTrunc { + fmt.Fprintf(w, "%s\t", entry.CreatedBy) + } else { + fmt.Fprintf(w, "%s\t", stringutils.Truncate(entry.CreatedBy, 45)) + } + + if *human { + fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size))) + } else { + fmt.Fprintf(w, "%d\t", entry.Size) + } + + fmt.Fprintf(w, "%s", entry.Comment) + } + fmt.Fprintf(w, "\n") + } + w.Flush() + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go new file mode 100644 index 00000000..92adeed0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go @@ -0,0 +1,130 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/utils" +) + +// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. +// +// Usage: docker images [OPTIONS] [REPOSITORY] +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := Cli.Subcmd("images", []string{"[REPOSITORY]"}, "List images", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + matchName := cmd.Arg(0) + v := url.Values{} + if len(imageFilterArgs) > 0 { + filterJSON, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJSON) + } + + if cmd.NArg() == 1 { + // FIXME rename this parameter, to not be confused with the filters flag + v.Set("filter", matchName) + } + if *all { + v.Set("all", "1") + } + + serverResp, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + images := []types.Image{} + if err := json.NewDecoder(serverResp.body).Decode(&images); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + if *showDigests { + fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } else { + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } + } + + for _, image := range images { + ID := image.ID + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + + repoTags := image.RepoTags + repoDigests := image.RepoDigests + + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigsts so we only show it once below + repoDigests = []string{} + } + + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo, ref := parsers.ParseRepositoryTag(repoAndRef) + // default tag and digest to none - if there's a value, it'll be set below + tag := "" + digest := "" + if utils.DigestReference(ref) { + digest = ref + } else { + tag = ref + } + + if !*quiet { + if *showDigests { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) + } else { + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) + } + } else { + fmt.Fprintln(w, ID) + } + } + } + + if !*quiet { + w.Flush() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go new file mode 100644 index 00000000..ec3d028f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go @@ -0,0 +1,77 @@ +package client + +import ( + "fmt" + "io" + "net/url" + "os" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/registry" +) + +// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. +// +// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. +// +// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true) + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + v = url.Values{} + src = cmd.Arg(0) + repository = cmd.Arg(1) + ) + + v.Set("fromSrc", src) + v.Set("repo", repository) + for _, change := range flChanges.GetAll() { + v.Add("changes", change) + } + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") + v.Set("tag", cmd.Arg(2)) + } + + if repository != "" { + //Check if the given image name can be resolved + repo, _ := parsers.ParseRepositoryTag(repository) + if err := registry.ValidateRepositoryName(repo); err != nil { + return err + } + } + + var in io.Reader + + if src == "-" { + in = cli.in + } else if !urlutil.IsURL(src) { + v.Set("fromSrc", "-") + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + in = file + + } + + sopts := &streamOpts{ + rawTerminal: true, + in: in, + out: cli.out, + } + + _, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go new file mode 100644 index 00000000..c7b19ccb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go @@ -0,0 +1,108 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/units" +) + +// CmdInfo displays system-wide information. +// +// Usage: docker info +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := Cli.Subcmd("info", nil, "Display system-wide information", true) + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/info", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + info := &types.Info{} + if err := json.NewDecoder(serverResp.body).Decode(info); err != nil { + return fmt.Errorf("Error reading remote info: %v", err) + } + + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) + + if info.Debug { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) + fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", info.NEventsListener) + fmt.Fprintf(cli.out, "Init SHA1: %s\n", info.InitSha1) + fmt.Fprintf(cli.out, "Init Path: %s\n", info.InitPath) + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) + } + + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HttpProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HttpsProxy) + ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) + } + } + // Only output these warnings if the server supports these features + if h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")); err == nil { + if h.OS != "windows" { + if !info.MemoryLimit { + fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") + } + if !info.SwapLimit { + fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") + } + if !info.IPv4Forwarding { + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + } + if !info.BridgeNfIptables { + fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-iptables is disabled\n") + } + if !info.BridgeNfIp6tables { + fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled\n") + } + } + } + + if info.Labels != nil { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + + if info.ExperimentalBuild { + fmt.Fprintf(cli.out, "Experimental: true\n") + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go new file mode 100644 index 00000000..6e728bdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strings" + "text/template" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +// CmdInspect displays low-level information on one or more containers or images. +// +// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, "Return low-level information on a container or image", true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var tmpl *template.Template + var err error + var obj []byte + + if *tmplStr != "" { + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + } + + if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { + return fmt.Errorf("%q is not a valid value for --type", *inspectType) + } + + indented := new(bytes.Buffer) + indented.WriteString("[\n") + status := 0 + isImage := false + + for _, name := range cmd.Args() { + + if *inspectType == "" || *inspectType == "container" { + obj, _, err = readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil)) + if err != nil && *inspectType == "container" { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + } + + if obj == nil && (*inspectType == "" || *inspectType == "image") { + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil)) + isImage = true + if err != nil { + if strings.Contains(err.Error(), "No such") { + if *inspectType == "" { + fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "Error: No such image: %s\n", name) + } + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + + } + + if tmpl == nil { + if err := json.Indent(indented, obj, "", " "); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } else { + rdr := bytes.NewReader(obj) + dec := json.NewDecoder(rdr) + + if isImage { + inspPtr := types.ImageInspect{} + if err := dec.Decode(&inspPtr); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, inspPtr); err != nil { + rdr.Seek(0, 0) + var raw interface{} + if err := dec.Decode(&raw); err != nil { + return err + } + if err = tmpl.Execute(cli.out, raw); err != nil { + return err + } + } + } else { + inspPtr := types.ContainerJSON{} + if err := dec.Decode(&inspPtr); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, inspPtr); err != nil { + rdr.Seek(0, 0) + var raw interface{} + if err := dec.Decode(&raw); err != nil { + return err + } + if err = tmpl.Execute(cli.out, raw); err != nil { + return err + } + } + } + cli.out.Write([]byte{'\n'}) + } + indented.WriteString(",") + } + + if indented.Len() > 1 { + // Remove trailing ',' + indented.Truncate(indented.Len() - 1) + } + indented.WriteString("]\n") + + if tmpl == nil { + // Note that we will always write "[]" when "-f" isn't specified, + // to make sure the output would always be array, see + // https://github.com/docker/docker/pull/9500#issuecomment-65846734 + if _, err := io.Copy(cli.out, indented); err != nil { + return err + } + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go new file mode 100644 index 00000000..63abed31 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdKill kills one or more running container using SIGKILL or a specified signal. +// +// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, "Kill a running container using SIGKILL or a specified signal", true) + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to kill containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go new file mode 100644 index 00000000..9501db4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go @@ -0,0 +1,42 @@ +package client + +import ( + "io" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLoad loads an image from a tar archive. +// +// The tar archive is read from STDIN by default, or from a tar archive file. +// +// Usage: docker load [OPTIONS] +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := Cli.Subcmd("load", nil, "Load an image from a tar archive or STDIN", true) + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + sopts := &streamOpts{ + rawTerminal: true, + in: input, + out: cli.out, + } + if _, err := cli.stream("POST", "/images/load", sopts); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go new file mode 100644 index 00000000..68ec5c6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go @@ -0,0 +1,147 @@ +package client + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" +) + +// CmdLogin logs in or registers a user to a Docker registry service. +// +// If no server is specified, the user will be logged into or registered to the registry's index server. +// +// Usage: docker login SERVER +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServer+"\" is the default.", true) + cmd.Require(flag.Max, 1) + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + + cmd.ParseFlags(args, true) + + serverAddress := registry.IndexServer + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + authconfig, ok := cli.configFile.AuthConfigs[serverAddress] + if !ok { + authconfig = cliconfig.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + username = strings.Trim(username, " ") + if username == "" { + username = authconfig.Username + } + } + // Assume that a different username means they may not want to use + // the password or email from the config file, so prompt them + if username != authconfig.Username { + if password == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + // However, if they don't override the username use the + // password or email from the cmd line if specified. IOW, allow + // then to change/override them. And if not specified, just + // use what's in the config file + if password == "" { + password = authconfig.Password + } + if email == "" { + email = authconfig.Email + } + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.AuthConfigs[serverAddress] = authconfig + + serverResp, err := cli.call("POST", "/auth", cli.configFile.AuthConfigs[serverAddress], nil) + if serverResp.statusCode == 401 { + delete(cli.configFile.AuthConfigs, serverAddress) + if err2 := cli.configFile.Save(); err2 != nil { + fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2) + } + return err + } + if err != nil { + return err + } + + defer serverResp.body.Close() + + var response types.AuthResponse + if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { + // Upon error, remove entry + delete(cli.configFile.AuthConfigs, serverAddress) + return err + } + + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Error saving config file: %v", err) + } + fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename()) + + if response.Status != "" { + fmt.Fprintf(cli.out, "%s\n", response.Status) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go new file mode 100644 index 00000000..e81299b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go @@ -0,0 +1,38 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/registry" +) + +// CmdLogout logs a user out from a Docker registry. +// +// If no server is specified, the user will be logged out from the registry's index server. +// +// Usage: docker logout [SERVER] +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServer+"\" is the default.", true) + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + serverAddress := registry.IndexServer + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + } else { + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.AuthConfigs, serverAddress) + + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go new file mode 100644 index 00000000..f1d647f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go @@ -0,0 +1,69 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/timeutils" +) + +// CmdLogs fetches the logs of a given container. +// +// docker logs [OPTIONS] CONTAINER +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, "Fetch the logs of a container", true) + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + + serverResp, err := cli.call("GET", "/containers/"+name+"/json", nil, nil) + if err != nil { + return err + } + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + if logType := c.HostConfig.LogConfig.Type; logType != "json-file" { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType) + } + + v := url.Values{} + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *since != "" { + v.Set("since", timeutils.GetTimestamp(*since, time.Now())) + } + + if *times { + v.Set("timestamps", "1") + } + + if *follow { + v.Set("follow", "1") + } + v.Set("tail", *tail) + + sopts := &streamOpts{ + rawTerminal: c.Config.Tty, + out: cli.out, + err: cli.err, + } + + _, err = cli.stream("GET", "/containers/"+name+"/logs?"+v.Encode(), sopts) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go new file mode 100644 index 00000000..a1de2699 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go @@ -0,0 +1,16 @@ +// +build experimental + +package client + +import ( + "os" + + nwclient "github.com/docker/libnetwork/client" +) + +// CmdNetwork is used to create, display and configure network endpoints. +func (cli *DockerCli) CmdNetwork(args ...string) error { + nCli := nwclient.NewNetworkCli(cli.out, cli.err, nwclient.CallFunc(cli.callWrapper)) + args = append([]string{"network"}, args...) + return nCli.Cmd(os.Args[0], args...) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go new file mode 100644 index 00000000..94dd59d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdPause pauses all processes within one or more containers. +// +// Usage: docker pause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, "Pause all processes within a container", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to pause containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go new file mode 100644 index 00000000..d8bcbf6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go @@ -0,0 +1,72 @@ +package client + +import ( + "encoding/json" + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" +) + +// CmdPort lists port mappings for a container. +// If a private port is specified, it also shows the public-facing port that is NATed to the private port. +// +// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var c struct { + NetworkSettings struct { + Ports nat.PortMap + } + } + + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go new file mode 100644 index 00000000..e7fb97c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go @@ -0,0 +1,116 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/client/ps" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers/filters" +) + +// CmdPs outputs a list of Docker containers. +// +// Usage: docker ps [OPTIONS] +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.Args{} + v = url.Values{} + + cmd = Cli.Subcmd("ps", nil, "List containers", true) + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running") + since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running") + before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running") + format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") + flFilter = opts.NewListOpts(nil) + ) + cmd.Require(flag.Exact, 0) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.ParseFlags(args, true) + if *last == -1 && *nLatest { + *last = 1 + } + + if *all { + v.Set("all", "1") + } + + if *last != -1 { + v.Set("limit", strconv.Itoa(*last)) + } + + if *since != "" { + v.Set("since", *since) + } + + if *before != "" { + v.Set("before", *before) + } + + if *size { + v.Set("size", "1") + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + if len(psFilterArgs) > 0 { + filterJSON, err := filters.ToParam(psFilterArgs) + if err != nil { + return err + } + + v.Set("filters", filterJSON) + } + + serverResp, err := cli.call("GET", "/containers/json?"+v.Encode(), nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + containers := []types.Container{} + if err := json.NewDecoder(serverResp.body).Decode(&containers); err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.PsFormat()) > 0 { + f = cli.PsFormat() + } else { + f = "table" + } + } + + psCtx := ps.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Size: *size, + Trunc: !*noTrunc, + } + + ps.Format(psCtx, containers) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom.go new file mode 100644 index 00000000..d9e8fe07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom.go @@ -0,0 +1,217 @@ +package ps + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/units" +) + +const ( + tableKey = "table" + + idHeader = "CONTAINER ID" + imageHeader = "IMAGE" + namesHeader = "NAMES" + commandHeader = "COMMAND" + createdAtHeader = "CREATED AT" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + sizeHeader = "SIZE" + labelsHeader = "LABELS" +) + +type containerContext struct { + trunc bool + header []string + c types.Container +} + +func (c *containerContext) ID() string { + c.addHeader(idHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.addHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.addHeader(imageHeader) + if c.c.Image == "" { + return "" + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.addHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Truncate(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.addHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.addHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.addHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.addHeader(sizeHeader) + srw := units.HumanSize(float64(c.c.SizeRw)) + sv := units.HumanSize(float64(c.c.SizeRootFs)) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.addHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.addHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) fullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +func (c *containerContext) addHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func customFormat(ctx Context, containers []types.Container) { + var ( + table bool + header string + format = ctx.Format + buffer = bytes.NewBufferString("") + ) + + if strings.HasPrefix(ctx.Format, tableKey) { + table = true + format = format[len(tableKey):] + } + + format = strings.Trim(format, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + format = r.Replace(format) + + if table && ctx.Size { + format += "\t{{.Size}}" + } + + tmpl, err := template.New("ps template").Parse(format) + if err != nil { + buffer.WriteString(fmt.Sprintf("Invalid `docker ps` format: %v\n", err)) + } + + for _, container := range containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + if err := tmpl.Execute(buffer, containerCtx); err != nil { + buffer = bytes.NewBufferString(fmt.Sprintf("Invalid `docker ps` format: %v\n", err)) + break + } + if table && len(header) == 0 { + header = containerCtx.fullHeader() + } + buffer.WriteString("\n") + } + + if table { + if len(header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + containerCtx := &containerContext{} + tmpl.Execute(bytes.NewBufferString(""), containerCtx) + header = containerCtx.fullHeader() + } + + t := tabwriter.NewWriter(ctx.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(header)) + t.Write([]byte("\n")) + buffer.WriteTo(t) + t.Flush() + } else { + buffer.WriteTo(ctx.Output) + } +} + +func stripNamePrefix(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom_test.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom_test.go new file mode 100644 index 00000000..e6575375 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/custom_test.go @@ -0,0 +1,88 @@ +package ps + +import ( + "reflect" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +func TestContainerContextID(t *testing.T) { + containerID := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + expHeader string + call func() string + }{ + {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), idHeader, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, + {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, + {types.Container{Created: int(unix)}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, + {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + // comma-separated values means probably a map input, which won't + // be guaranteed to have the same order as our expected value + // We'll create maps and use reflect.DeepEquals to check instead: + entriesMap := make(map[string]string) + expMap := make(map[string]string) + entries := strings.Split(v, ",") + expectedEntries := strings.Split(c.expValue, ",") + for _, entry := range entries { + keyval := strings.Split(entry, "=") + entriesMap[keyval[0]] = keyval[1] + } + for _, expected := range expectedEntries { + keyval := strings.Split(expected, "=") + expMap[keyval[0]] = keyval[1] + } + if !reflect.DeepEqual(expMap, entriesMap) { + t.Fatalf("Expected entries: %v, got: %v", c.expValue, v) + } + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.fullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } + + c := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + h := ctx.fullHeader() + if h != "SWARM ID\tNODE NAME" { + t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) + + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/formatter.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/formatter.go new file mode 100644 index 00000000..1a1323ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps/formatter.go @@ -0,0 +1,73 @@ +package ps + +import ( + "io" + + "github.com/docker/docker/api/types" +) + +const ( + tableFormatKey = "table" + rawFormatKey = "raw" + + defaultTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + defaultQuietFormat = "{{.ID}}" +) + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format string + // Size when set to true will display the size of the output. + Size bool + // Quiet when set to true will simply print minimal information. + Quiet bool + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool +} + +// Format helps to format the output using the parameters set in the Context. +// Currently Format allow to display in raw, table or custom format the output. +func Format(ctx Context, containers []types.Container) { + switch ctx.Format { + case tableFormatKey: + tableFormat(ctx, containers) + case rawFormatKey: + rawFormat(ctx, containers) + default: + customFormat(ctx, containers) + } +} + +func rawFormat(ctx Context, containers []types.Container) { + if ctx.Quiet { + ctx.Format = `container_id: {{.ID}}` + } else { + ctx.Format = `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{.Status}} +names: {{.Names}} +labels: {{.Labels}} +ports: {{.Ports}} +` + if ctx.Size { + ctx.Format += `size: {{.Size}} +` + } + } + + customFormat(ctx, containers) +} + +func tableFormat(ctx Context, containers []types.Container) { + ctx.Format = defaultTableFormat + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + + customFormat(ctx, containers) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go new file mode 100644 index 00000000..d6b85543 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" + "net/url" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/graph/tags" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdPull pulls an image or a repository from the registry. +// +// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, "Pull an image or a repository from a registry", true) + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + addTrustedFlags(cmd, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + remote := cmd.Arg(0) + + taglessRemote, tag := parsers.ParseRepositoryTag(remote) + if tag == "" && !*allTags { + tag = tags.DEFAULTTAG + fmt.Fprintf(cli.out, "Using default tag: %s\n", tag) + } else if tag != "" && *allTags { + return fmt.Errorf("tag can't be used with --all-tags/-a") + } + + ref := registry.ParseReference(tag) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + if err != nil { + return err + } + + if isTrusted() && !ref.HasDigest() { + // Check if tag is digest + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + return cli.trustedPull(repoInfo, ref, authConfig) + } + + v := url.Values{} + v.Set("fromImage", ref.ImageName(taglessRemote)) + + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go new file mode 100644 index 00000000..5d01511c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" + "net/url" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdPush pushes an image or repository to the registry. +// +// Usage: docker push NAME[:TAG] +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, "Push an image or a repository to a registry", true) + addTrustedFlags(cmd, false) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + remote, tag := parsers.ParseRepositoryTag(cmd.Arg(0)) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(remote) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + // If we're not using a custom registry, we know the restrictions + // applied to repository names and can warn the user in advance. + // Custom repositories can have different rules, and we must also + // allow pushing by image ID. + if repoInfo.Official { + username := authConfig.Username + if username == "" { + username = "" + } + return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to / (ex: %s/%s)", username, repoInfo.LocalName) + } + + if isTrusted() { + return cli.trustedPush(repoInfo, tag, authConfig) + } + + v := url.Values{} + v.Set("tag", tag) + + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push") + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go new file mode 100644 index 00000000..ae09a462 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go @@ -0,0 +1,27 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRename renames a container. +// +// Usage: docker rename OLD_NAME NEW_NAME +func (cli *DockerCli) CmdRename(args ...string) error { + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, "Rename a container", true) + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + oldName := cmd.Arg(0) + newName := cmd.Arg(1) + + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", oldName, newName), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go new file mode 100644 index 00000000..88de4f75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go @@ -0,0 +1,39 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRestart restarts one or more running containers. +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, "Restart a running container", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var errNames []string + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to restart containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go new file mode 100644 index 00000000..5766727a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go @@ -0,0 +1,55 @@ +package client + +import ( + "fmt" + "net/url" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, "Remove one or more containers", true) + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + val := url.Values{} + if *v { + val.Set("v", "1") + } + if *link { + val.Set("link", "1") + } + + if *force { + val.Set("force", "1") + } + + var errNames []string + for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + name = strings.Trim(name, "/") + + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to remove containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go new file mode 100644 index 00000000..25d5646e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go @@ -0,0 +1,61 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRmi removes all images with the specified name(s). +// +// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdRmi(args ...string) error { + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, "Remove one or more images", true) + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var errNames []string + for _, name := range cmd.Args() { + serverResp, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + defer serverResp.body.Close() + + dels := []types.ImageDelete{} + if err := json.NewDecoder(serverResp.body).Decode(&dels); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + continue + } + + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) + } + } + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to remove images: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go new file mode 100644 index 00000000..50fccfa3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go @@ -0,0 +1,257 @@ +package client + +import ( + "fmt" + "io" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/resolvconf/dns" +) + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +// CmdRun runs a command in a new container. +// +// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdRun(args ...string) error { + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, "Run a command in a new container", true) + addTrustedFlags(cmd, true) + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) + // just in case the Parse does not exit + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + + if len(hostConfig.DNS) > 0 { + // check the DNS settings passed via --dns against + // localhost regexp to warn if they are trying to + // set a DNS to a localhost address + for _, dnsIP := range hostConfig.DNS { + if dns.IsLocalhost(dnsIP) { + fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + break + } + } + } + if config.Image == "" { + cmd.Usage() + return nil + } + + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := cmd.Lookup("-attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy when in TTY mode + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() + } + + createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + if sigProxy { + sigc := cli.forwardAllSignals(createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(cli.out, "%s\n", createResponse.ID) + }() + } + if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { + return ErrConflictRestartPolicyAndAutoRemove + } + // We need to instantiate the chan because the select needs it. It can + // be closed but can't be uninitialized. + hijacked := make(chan io.Closer) + // Block the return until the chan gets closed + defer func() { + logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)") + } + }() + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + v = url.Values{} + ) + v.Set("stream", "1") + if config.AttachStdin { + v.Set("stdin", "1") + in = cli.in + } + if config.AttachStdout { + v.Set("stdout", "1") + out = cli.out + } + if config.AttachStderr { + v.Set("stderr", "1") + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + errCh = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil) + }) + } else { + close(hijacked) + } + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + defer func() { + if *flAutoRemove { + if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil { + fmt.Fprintf(cli.err, "Error deleting container: %s\n", err) + } + } + }() + + //start the container + if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil { + return err + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil { + return err + } + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } else { + // No Autoremove: Simply retrieve the exit code + if !config.Tty { + // In non-TTY mode, we can't detach, so we must wait for container exit + if status, err = waitForExit(cli, createResponse.ID); err != nil { + return err + } + } else { + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go new file mode 100644 index 00000000..ee19d776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go @@ -0,0 +1,57 @@ +package client + +import ( + "errors" + "net/url" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdSave saves one or more images to a tar archive. +// +// The tar archive is written to STDOUT by default, or written to a file. +// +// Usage: docker save [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, "Save an image(s) to a tar archive (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + output = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + sopts := &streamOpts{ + rawTerminal: true, + out: output, + } + + if len(cmd.Args()) == 1 { + image := cmd.Arg(0) + if _, err := cli.stream("GET", "/images/"+image+"/get", sopts); err != nil { + return err + } + } else { + v := url.Values{} + for _, arg := range cmd.Args() { + v.Add("names", arg) + } + if _, err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go new file mode 100644 index 00000000..2305d083 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go @@ -0,0 +1,87 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + "text/tabwriter" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" +) + +// ByStars sorts search results in ascending order by number of stars. +type ByStars []registry.SearchResult + +func (r ByStars) Len() int { return len(r) } +func (r ByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r ByStars) Less(i, j int) bool { return r[i].StarCount < r[j].StarCount } + +// CmdSearch searches the Docker Hub for images. +// +// Usage: docker search [OPTIONS] TERM +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := Cli.Subcmd("search", []string{"TERM"}, "Search the Docker Hub for images", true) + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Uint([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + v := url.Values{} + v.Set("term", name) + + // Resolve the Repository name from fqn to hostname + name + taglessRemote, _ := parsers.ParseRepositoryTag(name) + repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + if err != nil { + return err + } + + rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search") + if err != nil { + return err + } + + defer rdr.Close() + + results := ByStars{} + if err := json.NewDecoder(rdr).Decode(&results); err != nil { + return err + } + + sort.Sort(sort.Reverse(results)) + + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + if ((*automated || *trusted) && (!res.IsTrusted && !res.IsAutomated)) || (int(*stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = stringutils.Truncate(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated || res.IsTrusted { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/service.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/service.go new file mode 100644 index 00000000..9f0b1fcf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/service.go @@ -0,0 +1,17 @@ +// +build experimental + +package client + +import ( + "os" + + nwclient "github.com/docker/libnetwork/client" +) + +// CmdService is used to manage network services. +// service command is user to publish, attach and list a service from a container. +func (cli *DockerCli) CmdService(args ...string) error { + nCli := nwclient.NewNetworkCli(cli.out, cli.err, nwclient.CallFunc(cli.callWrapper)) + args = append([]string{"service"}, args...) + return nCli.Cmd(os.Args[0], args...) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go new file mode 100644 index 00000000..e039df02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go @@ -0,0 +1,170 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" +) + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +// CmdStart starts one or more stopped containers. +// +// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, "Start one or more stopped containers", true) + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + cErr chan error + tty bool + ) + + if *attach || *openStdin { + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return err + } + + tty = c.Config.Tty + + if !tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + + if *openStdin && c.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + hijacked := make(chan io.Closer) + // Block the return until the chan gets closed + defer func() { + logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)") + } + cli.in.Close() + }() + cErr = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) + }) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-cErr: + if err != nil { + return err + } + } + } + + var encounteredError error + var errNames []string + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil)) + if err != nil { + if !*attach && !*openStdin { + // attach and openStdin is false means it could be starting multiple containers + // when a container start failed, show the error message and start next + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + encounteredError = err + } + } else { + if !*attach && !*openStdin { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + } + + if len(errNames) > 0 { + encounteredError = fmt.Errorf("Error: failed to start containers: %v", errNames) + } + if encounteredError != nil { + return encounteredError + } + + if *openStdin || *attach { + if tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go new file mode 100644 index 00000000..1feb1e11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go @@ -0,0 +1,202 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "sort" + "strings" + "sync" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/units" +) + +type containerStats struct { + Name string + CPUPercentage float64 + Memory float64 + MemoryLimit float64 + MemoryPercentage float64 + NetworkRx float64 + NetworkTx float64 + mu sync.RWMutex + err error +} + +func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { + v := url.Values{} + if streamStats { + v.Set("stream", "1") + } else { + v.Set("stream", "0") + } + serverResp, err := cli.call("GET", "/containers/"+s.Name+"/stats?"+v.Encode(), nil, nil) + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + + defer serverResp.body.Close() + + var ( + previousCPU uint64 + previousSystem uint64 + dec = json.NewDecoder(serverResp.body) + u = make(chan error, 1) + ) + go func() { + for { + var v *types.Stats + if err := dec.Decode(&v); err != nil { + u <- err + return + } + var ( + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + cpuPercent = 0.0 + ) + previousCPU = v.PreCpuStats.CpuUsage.TotalUsage + previousSystem = v.PreCpuStats.SystemUsage + cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + s.mu.Lock() + s.CPUPercentage = cpuPercent + s.Memory = float64(v.MemoryStats.Usage) + s.MemoryLimit = float64(v.MemoryStats.Limit) + s.MemoryPercentage = memPercent + s.NetworkRx = float64(v.Network.RxBytes) + s.NetworkTx = float64(v.Network.TxBytes) + s.mu.Unlock() + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.mu.Lock() + s.CPUPercentage = 0 + s.Memory = 0 + s.MemoryPercentage = 0 + s.mu.Unlock() + case err := <-u: + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + } + if !streamStats { + return + } + } +} + +func (s *containerStats) Display(w io.Writer) error { + s.mu.RLock() + defer s.mu.RUnlock() + if s.err != nil { + return s.err + } + fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", + s.Name, + s.CPUPercentage, + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), + s.MemoryPercentage, + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx)) + return nil +} + +// CmdStats displays a live stream of resource usage statistics for one or more containers. +// +// This shows real-time information on CPU usage, memory usage, and network I/O. +// +// Usage: docker stats CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStats(args ...string) error { + cmd := Cli.Subcmd("stats", []string{"CONTAINER [CONTAINER...]"}, "Display a live stream of one or more containers' resource usage statistics", true) + noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + names := cmd.Args() + sort.Strings(names) + var ( + cStats []*containerStats + w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + ) + printHeader := func() { + if !*noStream { + fmt.Fprint(cli.out, "\033[2J") + fmt.Fprint(cli.out, "\033[H") + } + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n") + } + for _, n := range names { + s := &containerStats{Name: n} + cStats = append(cStats, s) + go s.Collect(cli, !*noStream) + } + // do a quick pause so that any failed connections for containers that do not exist are able to be + // evicted before we display the initial or default values. + time.Sleep(1500 * time.Millisecond) + var errs []string + for _, c := range cStats { + c.mu.Lock() + if c.err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) + } + c.mu.Unlock() + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + for range time.Tick(500 * time.Millisecond) { + printHeader() + toRemove := []int{} + for i, s := range cStats { + if err := s.Display(w); err != nil && !*noStream { + toRemove = append(toRemove, i) + } + } + for j := len(toRemove) - 1; j >= 0; j-- { + i := toRemove[j] + cStats = append(cStats[:i], cStats[i+1:]...) + } + if len(cStats) == 0 { + return nil + } + w.Flush() + if *noStream { + break + } + } + return nil +} + +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.Stats) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CpuStats.SystemUsage - previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stats_unit_test.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats_unit_test.go new file mode 100644 index 00000000..0831dbcb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats_unit_test.go @@ -0,0 +1,29 @@ +package client + +import ( + "bytes" + "sync" + "testing" +) + +func TestDisplay(t *testing.T) { + c := &containerStats{ + Name: "app", + CPUPercentage: 30.0, + Memory: 100 * 1024 * 1024.0, + MemoryLimit: 2048 * 1024 * 1024.0, + MemoryPercentage: 100.0 / 2048.0 * 100.0, + NetworkRx: 100 * 1024 * 1024, + NetworkTx: 800 * 1024 * 1024, + mu: sync.RWMutex{}, + } + var b bytes.Buffer + if err := c.Display(&b); err != nil { + t.Fatalf("c.Display() gave error: %s", err) + } + got := b.String() + want := "app\t30.00%\t104.9 MB/2.147 GB\t4.88%\t104.9 MB/838.9 MB\n" + if got != want { + t.Fatalf("c.Display() = %q, want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go new file mode 100644 index 00000000..b7348a7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdStop stops one or more running containers. +// +// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var errNames []string + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to stop containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go new file mode 100644 index 00000000..454c7ec5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go @@ -0,0 +1,42 @@ +package client + +import ( + "net/url" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" +) + +// CmdTag tags an image into a repository. +// +// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, "Tag an image into a repository", true) + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + var ( + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + v = url.Values{} + ) + + //Check if the given image name can be resolved + if err := registry.ValidateRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) + v.Set("tag", tag) + + if *force { + v.Set("force", "1") + } + + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, nil)); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go new file mode 100644 index 00000000..c9934fe0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go @@ -0,0 +1,49 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + "text/tabwriter" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdTop displays the running processes of a container. +// +// Usage: docker top CONTAINER +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, "Display the running processes of a container", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + val := url.Values{} + if cmd.NArg() > 1 { + val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) + } + + serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + procList := types.ContainerProcessList{} + if err := json.NewDecoder(serverResp.body).Decode(&procList); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go new file mode 100644 index 00000000..b07cb79d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go @@ -0,0 +1,435 @@ +package client + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/ansiescape" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/pkg/passphrase" + "github.com/docker/notary/trustmanager" + "github.com/endophage/gotuf/data" +) + +var untrusted bool + +func addTrustedFlags(fs *flag.FlagSet, verify bool) { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + message := "Skip image signing" + if verify { + message = "Skip image verification" + } + fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message) +} + +func isTrusted() bool { + return !untrusted +} + +var targetRegexp = regexp.MustCompile(`([\S]+): digest: ([\S]+) size: ([\d]+)`) + +type target struct { + reference registry.Reference + digest digest.Digest + size int64 +} + +func (cli *DockerCli) trustDirectory() string { + return filepath.Join(cliconfig.ConfigDir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func (cli *DockerCli) certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil +} + +func trustServer(index *registry.IndexInfo) string { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + if !strings.HasPrefix(s, "https://") { + return "https://" + s + } + return s + } + if index.Official { + return registry.NotaryServer + } + return "https://" + index.Name +} + +type simpleCredentialStore struct { + auth cliconfig.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig cliconfig.AuthConfig) (*client.NotaryRepository, error) { + server := trustServer(repoInfo.Index) + if !strings.HasPrefix(server, "https://") { + return nil, errors.New("unsupported scheme: https required for trust server") + } + + var cfg = tlsconfig.ClientDefault + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := cli.certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.DockerHeaders(http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + challengeManager := auth.NewSimpleChallengeManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + + creds := simpleCredentialStore{auth: authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.CanonicalName, "push", "pull") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) + tr := transport.NewTransport(base, modifiers...) + + return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.CanonicalName, server, tr, cli.getPassphraseRetriever()) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + reference: registry.ParseReference(t.Name), + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { + baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_TARGET_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_SNAPSHOT_PASSPHRASE"), + } + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +func (cli *DockerCli) trustedReference(repo string, ref registry.Reference) (registry.Reference, error) { + repoInfo, err := registry.ParseRepositoryInfo(repo) + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) + + notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.String()) + if err != nil { + return nil, err + } + r, err := convertTarget(*t) + if err != nil { + return nil, err + + } + + return registry.DigestReference(r.digest), nil +} + +func (cli *DockerCli) tagTrusted(repoInfo *registry.RepositoryInfo, trustedRef, ref registry.Reference) error { + fullName := trustedRef.ImageName(repoInfo.LocalName) + fmt.Fprintf(cli.out, "Tagging %s as %s\n", fullName, ref.ImageName(repoInfo.LocalName)) + tv := url.Values{} + tv.Set("repo", repoInfo.LocalName) + tv.Set("tag", ref.String()) + tv.Set("force", "1") + + if _, _, err := readBody(cli.call("POST", "/images/"+fullName+"/tag?"+tv.Encode(), nil, nil)); err != nil { + return err + } + + return nil +} + +func notaryError(err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return errors.New("no trust data available for remote repository") + case client.ErrExpired: + return fmt.Errorf("remote repository out-of-date: %v", err) + case trustmanager.ErrKeyNotFound: + return fmt.Errorf("signing keys not found: %v", err) + } + + return err +} + +func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig cliconfig.AuthConfig) error { + var ( + v = url.Values{} + refs = []target{} + ) + + notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) + return err + } + + if ref.String() == "" { + // List all targets + targets, err := notaryRepo.ListTargets() + if err != nil { + return notaryError(err) + } + for _, tgt := range targets { + t, err := convertTarget(*tgt) + if err != nil { + fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.LocalName) + continue + } + refs = append(refs, t) + } + } else { + t, err := notaryRepo.GetTargetByName(ref.String()) + if err != nil { + return notaryError(err) + } + r, err := convertTarget(*t) + if err != nil { + return err + + } + refs = append(refs, r) + } + + v.Set("fromImage", repoInfo.LocalName) + for i, r := range refs { + displayTag := r.reference.String() + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.LocalName, displayTag, r.digest) + v.Set("tag", r.digest.String()) + + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") + if err != nil { + return err + } + + // If reference is not trusted, tag by trusted reference + if !r.reference.HasDigest() { + if err := cli.tagTrusted(repoInfo, registry.DigestReference(r.digest), r.reference); err != nil { + return err + + } + } + } + return nil +} + +func targetStream(in io.Writer) (io.WriteCloser, <-chan []target) { + r, w := io.Pipe() + out := io.MultiWriter(in, w) + targetChan := make(chan []target) + + go func() { + targets := []target{} + scanner := bufio.NewScanner(r) + scanner.Split(ansiescape.ScanANSILines) + for scanner.Scan() { + line := scanner.Bytes() + if matches := targetRegexp.FindSubmatch(line); len(matches) == 4 { + dgst, err := digest.ParseDigest(string(matches[2])) + if err != nil { + // Line does match what is expected, continue looking for valid lines + logrus.Debugf("Bad digest value %q in matched line, ignoring\n", string(matches[2])) + continue + } + s, err := strconv.ParseInt(string(matches[3]), 10, 64) + if err != nil { + // Line does match what is expected, continue looking for valid lines + logrus.Debugf("Bad size value %q in matched line, ignoring\n", string(matches[3])) + continue + } + + targets = append(targets, target{ + reference: registry.ParseReference(string(matches[1])), + digest: dgst, + size: s, + }) + } + } + targetChan <- targets + }() + + return ioutils.NewWriteCloserWrapper(out, w.Close), targetChan +} + +func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, authConfig cliconfig.AuthConfig) error { + streamOut, targetChan := targetStream(cli.out) + + v := url.Values{} + v.Set("tag", tag) + + _, _, err := cli.clientRequestAttemptLogin("POST", "/images/"+repoInfo.LocalName+"/push?"+v.Encode(), nil, streamOut, repoInfo.Index, "push") + // Close stream channel to finish target parsing + if err := streamOut.Close(); err != nil { + return err + } + // Check error from request + if err != nil { + return err + } + + // Get target results + targets := <-targetChan + + if tag == "" { + fmt.Fprintf(cli.out, "No tag specified, skipping trust metadata push\n") + return nil + } + if len(targets) == 0 { + fmt.Fprintf(cli.out, "No targets found, skipping trust metadata push\n") + return nil + } + + fmt.Fprintf(cli.out, "Signing and pushing trust metadata\n") + + repo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) + return err + } + + for _, target := range targets { + h, err := hex.DecodeString(target.digest.Hex()) + if err != nil { + return err + } + t := &client.Target{ + Name: target.reference.String(), + Hashes: data.Hashes{ + string(target.digest.Algorithm()): h, + }, + Length: int64(target.size), + } + if err := repo.AddTarget(t); err != nil { + return err + } + } + + err = repo.Publish() + if _, ok := err.(*client.ErrRepoNotInitialized); !ok { + return notaryError(err) + } + + ks := repo.KeyStoreManager + keys := ks.RootKeyStore().ListKeys() + var rootKey string + + if len(keys) == 0 { + rootKey, err = ks.GenRootKey("ecdsa") + if err != nil { + return err + } + } else { + // TODO(dmcgowan): let user choose + rootKey = keys[0] + } + + cryptoService, err := ks.GetRootCryptoService(rootKey) + if err != nil { + return err + } + + if err := repo.Initialize(cryptoService); err != nil { + return notaryError(err) + } + fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.CanonicalName) + + return notaryError(repo.Publish()) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go new file mode 100644 index 00000000..cd1e6766 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdUnpause unpauses all processes within a container, for one or more containers. +// +// Usage: docker unpause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, "Unpause all processes within a container", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to unpause containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go new file mode 100644 index 00000000..8f822155 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go @@ -0,0 +1,379 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + gosignal "os/signal" + "runtime" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" +) + +var ( + errConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") +) + +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// HTTPClient creates a new HTTP client with the cli's client transport instance. +func (cli *DockerCli) HTTPClient() *http.Client { + return &http.Client{Transport: cli.transport} +} + +func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers map[string][]string) (*serverResponse, error) { + + serverResp := &serverResponse{ + body: nil, + statusCode: -1, + } + + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && in == nil { + in = bytes.NewReader([]byte{}) + } + req, err := http.NewRequest(method, fmt.Sprintf("%s/v%s%s", cli.basePath, api.Version, path), in) + if err != nil { + return serverResp, err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.configFile.HTTPHeaders { + req.Header.Set(k, v) + } + + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + + resp, err := cli.HTTPClient().Do(req) + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return serverResp, errConnectionRefused + } + + if cli.tlsConfig == nil { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?\n* Is your docker daemon up and running?", err) + } + if cli.tlsConfig != nil && strings.Contains(err.Error(), "remote error: bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) { + cmdAttempt := func(authConfig cliconfig.AuthConfig) (io.ReadCloser, int, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, -1, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + // begin the request + serverResp, err := cli.clientRequest(method, path, in, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + if err == nil && out != nil { + // If we are streaming output, complete the stream since + // errors may not appear until later. + err = cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), true, out, nil) + } + if err != nil { + // Since errors in a stream appear after status 200 has been written, + // we may need to change the status code. + if strings.Contains(err.Error(), "Authentication is required") || + strings.Contains(err.Error(), "Status 401") || + strings.Contains(err.Error(), "401 Unauthorized") || + strings.Contains(err.Error(), "status code 401") { + serverResp.statusCode = http.StatusUnauthorized + } + } + return serverResp.body, serverResp.statusCode, err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile, index) + body, statusCode, err := cmdAttempt(authConfig) + if statusCode == http.StatusUnauthorized { + fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) + if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil { + return nil, -1, err + } + authConfig = registry.ResolveAuthConfig(cli.configFile, index) + return cmdAttempt(authConfig) + } + return body, statusCode, err +} + +func (cli *DockerCli) callWrapper(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, http.Header, int, error) { + sr, err := cli.call(method, path, data, headers) + return sr.body, sr.header, sr.statusCode, err +} + +func (cli *DockerCli) call(method, path string, data interface{}, headers map[string][]string) (*serverResponse, error) { + params, err := cli.encodeData(data) + if err != nil { + sr := &serverResponse{ + body: nil, + header: nil, + statusCode: -1, + } + return sr, nil + } + + if data != nil { + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + } + + serverResp, err := cli.clientRequest(method, path, params, headers) + return serverResp, err +} + +type streamOpts struct { + rawTerminal bool + in io.Reader + out io.Writer + err io.Writer + headers map[string][]string +} + +func (cli *DockerCli) stream(method, path string, opts *streamOpts) (*serverResponse, error) { + serverResp, err := cli.clientRequest(method, path, opts.in, opts.headers) + if err != nil { + return serverResp, err + } + return serverResp, cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), opts.rawTerminal, opts.out, opts.err) +} + +func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, rawTerminal bool, stdout, stderr io.Writer) error { + defer body.Close() + + if api.MatchesContentType(contentType, "application/json") { + return jsonmessage.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut) + } + if stdout != nil || stderr != nil { + // When TTY is ON, use regular copy + var err error + if rawTerminal { + _, err = io.Copy(stdout, body) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, body) + } + logrus.Debugf("[stream] End of stdout") + return err + } + return nil +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + v := url.Values{} + v.Set("h", strconv.Itoa(height)) + v.Set("w", strconv.Itoa(width)) + + path := "" + if !isExec { + path = "/containers/" + id + "/resize?" + } else { + path = "/exec/" + id + "/resize?" + } + + if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, nil)); err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +func waitForExit(cli *DockerCli, containerID string) (int, error) { + serverResp, err := cli.call("POST", "/containers/"+containerID+"/wait", nil, nil) + if err != nil { + return -1, err + } + + defer serverResp.body.Close() + + var res types.ContainerWaitResponse + if err := json.NewDecoder(serverResp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { + serverResp, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != errConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + defer serverResp.body.Close() + + var c types.ContainerJSON + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return false, -1, err + } + + return c.State.Running, c.State.ExitCode, nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { + serverResp, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != errConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + defer serverResp.body.Close() + + //TODO: Should we reconsider having a type in api/types? + //this is a response to exex/id/json not container + var c struct { + Running bool + ExitCode int + } + + if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil { + return false, -1, err + } + + return c.Running, c.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.getTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.getTtySize() + + if prevW != w || prevH != h { + cli.resizeTty(id, isExec) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + cli.resizeTty(id, isExec) + } + }() + } + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func readBody(serverResp *serverResponse, err error) ([]byte, int, error) { + if serverResp.body != nil { + defer serverResp.body.Close() + } + if err != nil { + return nil, serverResp.statusCode, err + } + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return nil, -1, err + } + return body, serverResp.statusCode, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go new file mode 100644 index 00000000..2f1dba07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go @@ -0,0 +1,96 @@ +package client + +import ( + "encoding/json" + "runtime" + "text/template" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.ApiVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} + Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.ApiVersion}} + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} + Experimental: {{.Server.Experimental}}{{end}}{{end}}` + +type versionData struct { + Client types.Version + ServerOK bool + Server types.Version +} + +// CmdVersion shows Docker version information. +// +// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. +// +// Usage: docker version +func (cli *DockerCli) CmdVersion(args ...string) (err error) { + cmd := Cli.Subcmd("version", nil, "Show the Docker version information.", true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + if *tmplStr == "" { + *tmplStr = versionTemplate + } + + var tmpl *template.Template + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := versionData{ + Client: types.Version{ + Version: dockerversion.VERSION, + ApiVersion: api.Version, + GoVersion: runtime.Version(), + GitCommit: dockerversion.GITCOMMIT, + BuildTime: dockerversion.BUILDTIME, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: utils.ExperimentalBuild(), + }, + } + + defer func() { + if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { + err = err2 + } + cli.out.Write([]byte{'\n'}) + }() + + serverResp, err := cli.call("GET", "/version", nil, nil) + if err != nil { + return err + } + + defer serverResp.body.Close() + + if err = json.NewDecoder(serverResp.body).Decode(&vd.Server); err != nil { + return Cli.StatusError{StatusCode: 1, + Status: "Error reading remote version: " + err.Error()} + } + + vd.ServerOK = true + + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go new file mode 100644 index 00000000..829a320c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdWait blocks until a container stops, then prints its exit code. +// +// If more than one container is specified, this will wait synchronously on each container. +// +// Usage: docker wait CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, "Block until a container stops, then print its exit code.", true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errNames []string + for _, name := range cmd.Args() { + status, err := waitForExit(cli, name) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + errNames = append(errNames, name) + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + if len(errNames) > 0 { + return fmt.Errorf("Error: failed to wait containers: %v", errNames) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/common.go b/Godeps/_workspace/src/github.com/docker/docker/api/common.go new file mode 100644 index 00000000..d31c557e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/common.go @@ -0,0 +1,133 @@ +package api + +import ( + "fmt" + "mime" + "path/filepath" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/version" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // Current REST API version + Version version.Version = "1.21" + + // Minimun REST API version supported + MinVersion version.Version = "1.12" + + // Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +type ByPrivatePort []types.Port + +func (r ByPrivatePort) Len() int { return len(r) } +func (r ByPrivatePort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r ByPrivatePort) Less(i, j int) bool { return r[i].PrivatePort < r[j].PrivatePort } + +func DisplayablePorts(ports []types.Port) string { + var ( + result = []string{} + hostMappings = []string{} + firstInGroupMap map[string]int + lastInGroupMap map[string]int + ) + firstInGroupMap = make(map[string]int) + lastInGroupMap = make(map[string]int) + sort.Sort(ByPrivatePort(ports)) + for _, port := range ports { + var ( + current = port.PrivatePort + portKey = port.Type + firstInGroup int + lastInGroup int + ) + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + firstInGroup = firstInGroupMap[portKey] + lastInGroup = lastInGroupMap[portKey] + + if firstInGroup == 0 { + firstInGroupMap[portKey] = current + lastInGroupMap[portKey] = current + continue + } + + if current == (lastInGroup + 1) { + lastInGroupMap[portKey] = current + continue + } + result = append(result, FormGroup(portKey, firstInGroup, lastInGroup)) + firstInGroupMap[portKey] = current + lastInGroupMap[portKey] = current + } + for portKey, firstInGroup := range firstInGroupMap { + result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey])) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func FormGroup(key string, start, last int) string { + var ( + group string + parts = strings.Split(key, "/") + groupType = parts[0] + ip = "" + ) + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + if start == last { + group = fmt.Sprintf("%d", start) + } else { + group = fmt.Sprintf("%d-%d", start, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/form.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/form.go new file mode 100644 index 00000000..6a8387a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/form.go @@ -0,0 +1,56 @@ +package server + +import ( + "fmt" + "net/http" + "strconv" + "strings" +) + +func boolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// boolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func boolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return boolValue(r, k) +} + +func int64ValueOrZero(r *http.Request, k string) int64 { + val, err := strconv.ParseInt(r.FormValue(k), 10, 64) + if err != nil { + return 0 + } + return val +} + +type archiveOptions struct { + name string + path string +} + +func archiveFormValues(r *http.Request, vars map[string]string) (archiveOptions, error) { + if vars == nil { + return archiveOptions{}, fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return archiveOptions{}, err + } + + name := vars["name"] + path := r.Form.Get("path") + + switch { + case name == "": + return archiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return archiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return archiveOptions{name, path}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/form_test.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/form_test.go new file mode 100644 index 00000000..5b3bd718 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/form_test.go @@ -0,0 +1,70 @@ +package server + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := boolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !boolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if boolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 00000000..766462bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,38 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +func profilerSetup(mainRouter *mux.Router, path string) { + var r = mainRouter.PathPrefix(path).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go new file mode 100644 index 00000000..19ebc550 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go @@ -0,0 +1,1727 @@ +package server + +import ( + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + "golang.org/x/net/websocket" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/builder" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + daemon *daemon.Daemon + cfg *Config + router *mux.Router + start chan struct{} + servers []serverCloser +} + +// New returns a new instance of the server based on the specified configuration. +func New(cfg *Config) *Server { + srv := &Server{ + cfg: cfg, + start: make(chan struct{}), + } + r := createRouter(srv) + srv.router = r + return srv +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +type serverCloser interface { + Serve() error + Close() error +} + +// ServeAPI loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func (s *Server) ServeAPI(protoAddrs []string) error { + var chErrors = make(chan error, len(protoAddrs)) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format, expected PROTO://ADDR") + } + srv, err := s.newServer(protoAddrParts[0], protoAddrParts[1]) + if err != nil { + return err + } + s.servers = append(s.servers, srv...) + + for _, s := range srv { + logrus.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + go func(s serverCloser) { + if err := s.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(s) + } + } + + for i := 0; i < len(protoAddrs); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +// HTTPAPIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion). +type HTTPAPIFunc func(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +func closeStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// checkForJSON makes sure that the request's Content-Type is application/json. +func checkForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +//If we don't do this, POST method without Content-type (even with empty body) will fail +func parseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func parseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func httpError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + statusCode := http.StatusInternalServerError + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, we should + // create appropriate error types with clearly defined meaning. + errStr := strings.ToLower(err.Error()) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + + logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": err}).Error("HTTP Error") + http.Error(w, err.Error(), statusCode) +} + +// writeJSON writes the value v to the http response stream as json with standard +// json encoding. +func writeJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +func (s *Server) postAuth(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *cliconfig.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, err := s.daemon.RegistryService.Auth(config) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + }) +} + +func (s *Server) getVersion(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v := &types.Version{ + Version: dockerversion.VERSION, + ApiVersion: api.Version, + GitCommit: dockerversion.GITCOMMIT, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BUILDTIME, + } + + if version.GreaterThanOrEqualTo("1.19") { + v.Experimental = utils.ExperimentalBuild() + } + + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + v.KernelVersion = kernelVersion.String() + } + + return writeJSON(w, http.StatusOK, v) +} + +func (s *Server) postContainersKill(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + var sig uint64 + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sigN, err := strconv.ParseUint(sigStr, 10, 5) + if err != nil { + // The signal is not a number, treat it as a string (either like + // "KILL" or like "SIGKILL") + syscallSig, ok := signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")] + if !ok { + return fmt.Errorf("Invalid signal: %s", sigStr) + } + sig = uint64(syscallSig) + } else { + sig = sigN + } + + if sig == 0 { + return fmt.Errorf("Invalid signal: %s", sigStr) + } + } + + if err := s.daemon.ContainerKill(name, sig); err != nil { + _, isStopped := err.(daemon.ErrContainerNotRunning) + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainersPause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + if err := s.daemon.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainersUnpause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + if err := s.daemon.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) getContainersExport(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + return s.daemon.ContainerExport(vars["name"], w) +} + +func (s *Server) getImagesJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + imagesConfig := graph.ImagesConfig{ + Filters: r.Form.Get("filters"), + // FIXME this parameter could just be a match filter + Filter: r.Form.Get("filter"), + All: boolValue(r, "all"), + } + + images, err := s.daemon.Repositories().Images(&imagesConfig) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, images) +} + +func (s *Server) getInfo(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.daemon.SystemInfo() + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, info) +} + +func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var since int64 = -1 + if r.Form.Get("since") != "" { + s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) + if err != nil { + return err + } + since = s + } + + var until int64 = -1 + if r.Form.Get("until") != "" { + u, err := strconv.ParseInt(r.Form.Get("until"), 10, 64) + if err != nil { + return err + } + until = u + } + + timer := time.NewTimer(0) + timer.Stop() + if until > 0 { + dur := time.Unix(until, 0).Sub(time.Now()) + timer = time.NewTimer(dur) + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + isFiltered := func(field string, filter []string) bool { + if len(field) == 0 { + return false + } + if len(filter) == 0 { + return false + } + for _, v := range filter { + if v == field { + return false + } + if strings.Contains(field, ":") { + image := strings.Split(field, ":") + if image[0] == v { + return false + } + } + } + return true + } + + d := s.daemon + es := d.EventsService + w.Header().Set("Content-Type", "application/json") + outStream := ioutils.NewWriteFlusher(w) + outStream.Write(nil) // make sure response is sent immediately + enc := json.NewEncoder(outStream) + + getContainerID := func(cn string) string { + c, err := d.Get(cn) + if err != nil { + return "" + } + return c.ID + } + + sendEvent := func(ev *jsonmessage.JSONMessage) error { + //incoming container filter can be name,id or partial id, convert and replace as a full container id + for i, cn := range ef["container"] { + ef["container"][i] = getContainerID(cn) + } + + if isFiltered(ev.Status, ef["event"]) || (isFiltered(ev.ID, ef["image"]) && + isFiltered(ev.From, ef["image"])) || isFiltered(ev.ID, ef["container"]) { + return nil + } + + return enc.Encode(ev) + } + + current, l := es.Subscribe() + if since == -1 { + current = nil + } + defer es.Evict(l) + for _, ev := range current { + if ev.Time < since { + continue + } + if err := sendEvent(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(*jsonmessage.JSONMessage) + if !ok { + continue + } + if err := sendEvent(jev); err != nil { + return err + } + case <-timer.C: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} + +func (s *Server) getImagesHistory(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + history, err := s.daemon.Repositories().History(name) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, history) +} + +func (s *Server) getContainersChanges(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + changes, err := s.daemon.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, changes) +} + +func (s *Server) getContainersTop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := parseForm(r); err != nil { + return err + } + + procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, procList) +} + +func (s *Server) getContainersJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + config := &daemon.ContainersConfig{ + All: boolValue(r, "all"), + Size: boolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: r.Form.Get("filters"), + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.daemon.Containers(config) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, containers) +} + +func (s *Server) getContainersStats(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + stream := boolValueOrDefault(r, "stream", true) + var out io.Writer + if !stream { + w.Header().Set("Content-Type", "application/json") + out = w + } else { + out = ioutils.NewWriteFlusher(w) + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &daemon.ContainerStatsConfig{ + Stream: stream, + OutStream: out, + Stop: closeNotifier, + } + + return s.daemon.ContainerStats(vars["name"], config) +} + +func (s *Server) getContainersLogs(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + // Validate args here, because we can't return not StatusOK after job.Run() call + stdout, stderr := boolValue(r, "stdout"), boolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var since time.Time + if r.Form.Get("since") != "" { + s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) + if err != nil { + return err + } + since = time.Unix(s, 0) + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + c, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + outStream := ioutils.NewWriteFlusher(w) + // write an empty chunk of data (this is to ensure that the + // HTTP Response is sent immediatly, even if the container has + // not yet produced any data) + outStream.Write(nil) + + logsConfig := &daemon.ContainerLogsConfig{ + Follow: boolValue(r, "follow"), + Timestamps: boolValue(r, "timestamps"), + Since: since, + Tail: r.Form.Get("tail"), + UseStdout: stdout, + UseStderr: stderr, + OutStream: outStream, + Stop: closeNotifier, + } + + if err := s.daemon.ContainerLogs(c, logsConfig); err != nil { + fmt.Fprintf(w, "Error running logs job: %s\n", err) + } + + return nil +} + +func (s *Server) postImagesTag(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + force := boolValue(r, "force") + name := vars["name"] + if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil { + return err + } + s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "") + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *Server) postCommit(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + if err := checkForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := boolValue(r, "pause") + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + + commitCfg := &builder.CommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Changes: r.Form["changes"], + Config: c, + } + + imgID, err := builder.Commit(cname, s.daemon, commitCfg) + if err != nil { + return err + } + + return writeJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: imgID, + }) +} + +// Creates an image from Pull or from Import +func (s *Server) postImagesCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &cliconfig.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &cliconfig.AuthConfig{} + } + } + + var ( + err error + output = ioutils.NewWriteFlusher(w) + ) + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + imagePullConfig := &graph.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + OutStream: output, + } + + err = s.daemon.Repositories().Pull(image, tag, imagePullConfig) + } else { //import + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + + src := r.Form.Get("fromSrc") + imageImportConfig := &graph.ImageImportConfig{ + Changes: r.Form["changes"], + InConfig: r.Body, + OutStream: output, + } + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *runconfig.Config + newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes) + if err != nil { + return err + } + imageImportConfig.ContainerConfig = newConfig + + err = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil + +} + +func (s *Server) getImagesSearch(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + config *cliconfig.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &cliconfig.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.daemon.RegistryService.Search(r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, query.Results) +} + +func (s *Server) postImagesPush(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := parseForm(r); err != nil { + return err + } + authConfig := &cliconfig.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &cliconfig.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + name := vars["name"] + output := ioutils.NewWriteFlusher(w) + imagePushConfig := &graph.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + Tag: r.Form.Get("tag"), + OutStream: output, + } + + w.Header().Set("Content-Type", "application/json") + + if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil + +} + +func (s *Server) getImagesGet(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + imageExportConfig := &graph.ImageExportConfig{Outstream: output} + if name, ok := vars["name"]; ok { + imageExportConfig.Names = []string{name} + } else { + imageExportConfig.Names = r.Form["names"] + } + + if err := s.daemon.Repositories().ImageExport(imageExportConfig); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil + +} + +func (s *Server) postImagesLoad(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.daemon.Repositories().Load(r.Body, w) +} + +func (s *Server) postContainersCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if err := checkForJSON(r); err != nil { + return err + } + var ( + warnings []string + name = r.Form.Get("name") + ) + + config, hostConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + adjustCPUShares(version, hostConfig) + + containerID, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig) + if err != nil { + return err + } + + return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{ + ID: containerID, + Warnings: warnings, + }) +} + +func (s *Server) postContainersRestart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainerRename(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.daemon.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) deleteContainers(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + config := &daemon.ContainerRmConfig{ + ForceRemove: boolValue(r, "force"), + RemoveVolume: boolValue(r, "v"), + RemoveLink: boolValue(r, "link"), + } + + if err := s.daemon.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such id: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) deleteImages(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + name := vars["name"] + force := boolValue(r, "force") + noprune := boolValue(r, "noprune") + + list, err := s.daemon.ImageDelete(name, force, noprune) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, list) +} + +func (s *Server) postContainersStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *runconfig.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := checkForJSON(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil { + if err.Error() == "Container already started" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainersStop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil { + if err.Error() == "Container already stopped" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainersWait(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *Server) postContainersResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerResize(vars["name"], height, width) +} + +func (s *Server) postContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + cont, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer closeStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ + InStream: inStream, + OutStream: outStream, + UseStdin: boolValue(r, "stdin"), + UseStdout: boolValue(r, "stdout"), + UseStderr: boolValue(r, "stderr"), + Logs: boolValue(r, "logs"), + Stream: boolValue(r, "stream"), + } + + if err := s.daemon.ContainerAttachWithLogs(cont, attachWithLogsConfig); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + + return nil +} + +func (s *Server) wsContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + cont, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + + wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ + InStream: ws, + OutStream: ws, + ErrStream: ws, + Logs: boolValue(r, "logs"), + Stream: boolValue(r, "stream"), + } + + if err := s.daemon.ContainerWsAttachWithLogs(cont, wsAttachWithLogsConfig); err != nil { + logrus.Errorf("Error attaching websocket: %s", err) + } + }) + h.ServeHTTP(w, r) + + return nil +} + +func (s *Server) getContainersByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if version.LessThan("1.20") && runtime.GOOS != "windows" { + return getContainersByNameDownlevel(w, s, vars["name"]) + } + + containerJSON, err := s.daemon.ContainerInspect(vars["name"]) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, containerJSON) +} + +func (s *Server) getExecByID(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter 'id'") + } + + eConfig, err := s.daemon.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, eConfig) +} + +func (s *Server) getImagesByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + imageInspect, err := s.daemon.Repositories().Lookup(vars["name"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, imageInspect) +} + +func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]cliconfig.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + buildConfig = builder.NewBuildConfig() + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true + } else { + buildConfig.Remove = boolValue(r, "rm") + } + if boolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + buildConfig.Pull = true + } + + output := ioutils.NewWriteFlusher(w) + buildConfig.Stdout = output + buildConfig.Context = r.Body + + buildConfig.RemoteURL = r.FormValue("remote") + buildConfig.DockerfileName = r.FormValue("dockerfile") + buildConfig.RepoName = r.FormValue("t") + buildConfig.SuppressOutput = boolValue(r, "q") + buildConfig.NoCache = boolValue(r, "nocache") + buildConfig.ForceRemove = boolValue(r, "forcerm") + buildConfig.AuthConfigs = authConfigs + buildConfig.MemorySwap = int64ValueOrZero(r, "memswap") + buildConfig.Memory = int64ValueOrZero(r, "memory") + buildConfig.CPUShares = int64ValueOrZero(r, "cpushares") + buildConfig.CPUPeriod = int64ValueOrZero(r, "cpuperiod") + buildConfig.CPUQuota = int64ValueOrZero(r, "cpuquota") + buildConfig.CPUSetCpus = r.FormValue("cpusetcpus") + buildConfig.CPUSetMems = r.FormValue("cpusetmems") + buildConfig.CgroupParent = r.FormValue("cgroupparent") + + var buildUlimits = []*ulimit.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { + return err + } + buildConfig.Ulimits = buildUlimits + } + + // Job cancellation. Note: not all job types support this. + if closeNotifier, ok := w.(http.CloseNotifier); ok { + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-closeNotifier.CloseNotify(): + logrus.Infof("Client disconnected, cancelling job: build") + buildConfig.Cancel() + } + }() + } + + if err := builder.Build(s.daemon, buildConfig); err != nil { + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an interal error. + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + w.Write(sf.FormatError(err)) + } + return nil +} + +// postContainersCopy is deprecated in favor of getContainersArchivePath. +func (s *Server) postContainersCopy(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := checkForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such id") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *Server) headContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := archiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.daemon.ContainerStatPath(v.name, v.path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *Server) getContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := archiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *Server) putContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := archiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir") + return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body) +} + +func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if err := checkForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &runconfig.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.daemon.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %s", name, err) + return err + } + + return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *Server) postContainerExecStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + execName = vars["name"] + stdin io.ReadCloser + stdout io.Writer + stderr io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if !execStartCheck.Detach { + // Setting up the streaming http interface. + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer closeStreams(inStream, outStream) + + var errStream io.Writer + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + if !execStartCheck.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + stdin = inStream + stdout = outStream + stderr = errStream + } + // Now run the user process in container. + + if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + logrus.Errorf("Error starting exec command in container %s: %s", execName, err) + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *Server) postContainerExecResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerExecResize(vars["name"], height, width) +} + +func (s *Server) optionsHandler(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} +func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") +} + +func (s *Server) ping(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) { + if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + if l, err = sockets.NewTCPSocket(addr, s.cfg.TLSConfig, s.start); err != nil { + return nil, err + } + if err := allocateDaemonPort(addr); err != nil { + return nil, err + } + return +} + +func makeHTTPHandler(logging bool, localMethod string, localRoute string, handlerFunc HTTPAPIFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the request + logrus.Debugf("Calling %s %s", localMethod, localRoute) + + if logging { + logrus.Infof("%s %s", r.Method, r.RequestURI) + } + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + version := version.Version(mux.Vars(r)["version"]) + if version == "" { + version = api.Version + } + if corsHeaders != "" { + writeCorsHeaders(w, r, corsHeaders) + } + + if version.GreaterThan(api.Version) { + http.Error(w, fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", version, api.Version).Error(), http.StatusBadRequest) + return + } + if version.LessThan(api.MinVersion) { + http.Error(w, fmt.Errorf("client is too old, minimum supported API version is %s, please upgrade your client to a newer version", api.MinVersion).Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Server", "Docker/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + + if err := handlerFunc(version, w, r, mux.Vars(r)); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) + httpError(w, err) + } + } +} + +// we keep enableCors just for legacy usage, need to be removed in the future +func createRouter(s *Server) *mux.Router { + r := mux.NewRouter() + if os.Getenv("DEBUG") != "" { + profilerSetup(r, "/debug/") + } + m := map[string]map[string]HTTPAPIFunc{ + "HEAD": { + "/containers/{name:.*}/archive": s.headContainersArchive, + }, + "GET": { + "/_ping": s.ping, + "/events": s.getEvents, + "/info": s.getInfo, + "/version": s.getVersion, + "/images/json": s.getImagesJSON, + "/images/search": s.getImagesSearch, + "/images/get": s.getImagesGet, + "/images/{name:.*}/get": s.getImagesGet, + "/images/{name:.*}/history": s.getImagesHistory, + "/images/{name:.*}/json": s.getImagesByName, + "/containers/ps": s.getContainersJSON, + "/containers/json": s.getContainersJSON, + "/containers/{name:.*}/export": s.getContainersExport, + "/containers/{name:.*}/changes": s.getContainersChanges, + "/containers/{name:.*}/json": s.getContainersByName, + "/containers/{name:.*}/top": s.getContainersTop, + "/containers/{name:.*}/logs": s.getContainersLogs, + "/containers/{name:.*}/stats": s.getContainersStats, + "/containers/{name:.*}/attach/ws": s.wsContainersAttach, + "/exec/{id:.*}/json": s.getExecByID, + "/containers/{name:.*}/archive": s.getContainersArchive, + }, + "POST": { + "/auth": s.postAuth, + "/commit": s.postCommit, + "/build": s.postBuild, + "/images/create": s.postImagesCreate, + "/images/load": s.postImagesLoad, + "/images/{name:.*}/push": s.postImagesPush, + "/images/{name:.*}/tag": s.postImagesTag, + "/containers/create": s.postContainersCreate, + "/containers/{name:.*}/kill": s.postContainersKill, + "/containers/{name:.*}/pause": s.postContainersPause, + "/containers/{name:.*}/unpause": s.postContainersUnpause, + "/containers/{name:.*}/restart": s.postContainersRestart, + "/containers/{name:.*}/start": s.postContainersStart, + "/containers/{name:.*}/stop": s.postContainersStop, + "/containers/{name:.*}/wait": s.postContainersWait, + "/containers/{name:.*}/resize": s.postContainersResize, + "/containers/{name:.*}/attach": s.postContainersAttach, + "/containers/{name:.*}/copy": s.postContainersCopy, + "/containers/{name:.*}/exec": s.postContainerExecCreate, + "/exec/{name:.*}/start": s.postContainerExecStart, + "/exec/{name:.*}/resize": s.postContainerExecResize, + "/containers/{name:.*}/rename": s.postContainerRename, + }, + "PUT": { + "/containers/{name:.*}/archive": s.putContainersArchive, + }, + "DELETE": { + "/containers/{name:.*}": s.deleteContainers, + "/images/{name:.*}": s.deleteImages, + }, + "OPTIONS": { + "": s.optionsHandler, + }, + } + + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := s.cfg.CorsHeaders + if corsHeaders == "" && s.cfg.EnableCors { + corsHeaders = "*" + } + + for method, routes := range m { + for route, fct := range routes { + logrus.Debugf("Registering %s, %s", method, route) + // NOTE: scope issue, make sure the variables are local and won't be changed + localRoute := route + localFct := fct + localMethod := method + + // build the handler function + f := makeHTTPHandler(s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version)) + + // add the new route + if localRoute == "" { + r.Methods(localMethod).HandlerFunc(f) + } else { + r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) + r.Path(localRoute).Methods(localMethod).HandlerFunc(f) + } + } + } + + return r +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_experimental.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_experimental.go new file mode 100644 index 00000000..06f55013 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_experimental.go @@ -0,0 +1,17 @@ +// +build experimental + +package server + +func (s *Server) registerSubRouter() { + httpHandler := s.daemon.NetworkApiRouter() + + subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) + subrouter = s.router.PathPrefix("/networks").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) + + subrouter = s.router.PathPrefix("/v{version:[0-9.]+}/services").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) + subrouter = s.router.PathPrefix("/services").Subrouter() + subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux_test.go new file mode 100644 index 00000000..83244b1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux_test.go @@ -0,0 +1,68 @@ +// +build linux + +package server + +import ( + "testing" + + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + +func TestAdjustCPUSharesOldApi(t *testing.T) { + apiVersion := version.Version("1.18") + hostConfig := &runconfig.HostConfig{ + CPUShares: linuxMinCPUShares - 1, + } + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + apiVersion := version.Version("1.19") + hostConfig := &runconfig.HostConfig{ + CPUShares: linuxMinCPUShares - 1, + } + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + adjustCPUShares(apiVersion, hostConfig) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_stub.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_stub.go new file mode 100644 index 00000000..160c2922 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_stub.go @@ -0,0 +1,6 @@ +// +build !experimental + +package server + +func (s *Server) registerSubRouter() { +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go new file mode 100644 index 00000000..84df45bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go @@ -0,0 +1,136 @@ +// +build freebsd linux + +package server + +import ( + "fmt" + "net" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/sockets" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/portallocator" +) + +const ( + // See http://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 +) + +// newServer sets up the required serverClosers and does protocol specific checking. +func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { + var ( + err error + ls []net.Listener + ) + switch proto { + case "fd": + ls, err = systemd.ListenFD(addr) + if err != nil { + return nil, err + } + // We don't want to start serving on these sockets until the + // daemon is initialized and installed. Otherwise required handlers + // won't be ready. + <-s.start + case "tcp": + l, err := s.initTCPSocket(addr) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start) + if err != nil { + return nil, err + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + var res []serverCloser + for _, l := range ls { + res = append(res, &HTTPServer{ + &http.Server{ + Addr: addr, + Handler: s.router, + }, + l, + }) + } + return res, nil +} + +// AcceptConnections allows clients to connect to the API server. +// Referenced Daemon is notified about this server, and waits for the +// daemon acknowledgement before the incoming connections are accepted. +func (s *Server) AcceptConnections(d *daemon.Daemon) { + // Tell the init daemon we are accepting requests + s.daemon = d + s.registerSubRouter() + go systemd.SdNotify("READY=1") + // close the lock so the listeners start accepting connections + select { + case <-s.start: + default: + close(s.start) + } +} + +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +func adjustCPUShares(version version.Version, hostConfig *runconfig.HostConfig) { + if version.LessThan("1.19") { + if hostConfig != nil && hostConfig.CPUShares > 0 { + // Handle unsupported CpuShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CpuShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CpuShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + } +} + +// getContainersByNameDownlevel performs processing for pre 1.20 APIs. This +// is only relevant on non-Windows daemons. +func getContainersByNameDownlevel(w http.ResponseWriter, s *Server, namevar string) error { + containerJSONRaw, err := s.daemon.ContainerInspectPre120(namevar) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, containerJSONRaw) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go new file mode 100644 index 00000000..7c80cf0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package server + +import ( + "errors" + "net" + "net/http" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + +// NewServer sets up the required Server and does protocol specific checking. +func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { + var ( + ls []net.Listener + ) + switch proto { + case "tcp": + l, err := s.initTCPSocket(addr) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, errors.New("Invalid protocol format. Windows only supports tcp.") + } + + var res []serverCloser + for _, l := range ls { + res = append(res, &HTTPServer{ + &http.Server{ + Addr: addr, + Handler: s.router, + }, + l, + }) + } + return res, nil + +} + +// AcceptConnections allows router to start listening for the incoming requests. +func (s *Server) AcceptConnections(d *daemon.Daemon) { + s.daemon = d + s.registerSubRouter() + // close the lock so the listeners start accepting connections + select { + case <-s.start: + default: + close(s.start) + } +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func adjustCPUShares(version version.Version, hostConfig *runconfig.HostConfig) { +} + +// getContainersByNameDownlevel performs processing for pre 1.20 APIs. This +// is only relevant on non-Windows daemons. +func getContainersByNameDownlevel(w http.ResponseWriter, s *Server, namevar string) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go b/Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go new file mode 100644 index 00000000..507830ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,91 @@ +// This package is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// All CPU stats are aggregated since container inception. +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +// TODO Windows: This can be factored out +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// TODO Windows: This can be factored out +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// TODO Windows: This will require refactoring +type Network struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +type Stats struct { + Read time.Time `json:"read"` + Network Network `json:"network,omitempty"` + PreCpuStats CpuStats `json:"precpu_stats,omitempty"` + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/types/types.go b/Godeps/_workspace/src/github.com/docker/docker/api/types/types.go new file mode 100644 index 00000000..c99e5a9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/types/types.go @@ -0,0 +1,280 @@ +package types + +import ( + "os" + "time" + + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + +// ContainerCreateResponse contains the information returned to a client on the +// creation of a new container. +type ContainerCreateResponse struct { + // ID is the ID of the created container. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the creation of the container. + Warnings []string `json:"Warnings"` +} + +// POST /containers/{name:.*}/exec +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` +} + +// POST /auth +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` +} + +// POST "/containers/"+containerID+"/wait" +type ContainerWaitResponse struct { + // StatusCode is the status code of the wait job + StatusCode int `json:"StatusCode"` +} + +// POST "/commit?container="+containerID +type ContainerCommitResponse struct { + ID string `json:"Id"` +} + +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GET "/images/json" +type Image struct { + ID string `json:"Id"` + ParentId string + RepoTags []string + RepoDigests []string + Created int + Size int + VirtualSize int + Labels map[string]string +} + +type GraphDriverData struct { + Name string + Data map[string]string +} + +// GET "/images/{name:.*}/json" +type ImageInspect struct { + Id string + Parent string + Comment string + Created string + Container string + ContainerConfig *runconfig.Config + DockerVersion string + Author string + Config *runconfig.Config + Architecture string + Os string + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData +} + +// GET "/containers/json" +type Port struct { + IP string `json:",omitempty"` + PrivatePort int + PublicPort int `json:",omitempty"` + Type string +} + +type Container struct { + ID string `json:"Id"` + Names []string + Image string + Command string + Created int + Ports []Port + SizeRw int `json:",omitempty"` + SizeRootFs int `json:",omitempty"` + Labels map[string]string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } +} + +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET /containers/{name:.*}/archive +// "name" is the file or directory name. +// "path" is the absolute path to the resource in the container. +type ContainerPathStat struct { + Name string `json:"name"` + Path string `json:"path"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` +} + +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +type Version struct { + Version string + ApiVersion version.Version + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// GET "/info" +type Info struct { + ID string + Containers int + Images int + Driver string + DriverStatus [][2]string + MemoryLimit bool + SwapLimit bool + CpuCfsPeriod bool + CpuCfsQuota bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIp6tables bool + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + IndexServerAddress string + RegistryConfig interface{} + InitSha1 string + InitPath string + NCPU int + MemTotal int64 + DockerRootDir string + HttpProxy string + HttpsProxy string + NoProxy string + Name string + Labels []string + ExperimentalBuild bool +} + +// This struct is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +type ContainerState struct { + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string +} + +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + Id string + Created string + Path string + Args []string + State *ContainerState + Image string + NetworkSettings *network.Settings + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + ExecDriver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *runconfig.HostConfig + GraphDriver GraphDriverData +} + +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *runconfig.Config +} + +// backcompatibility struct along with ContainerConfig. Note this is not +// used by the Windows daemon. +type ContainerJSONPre120 struct { + *ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig +} + +type ContainerConfig struct { + *runconfig.Config + + // backward compatibility, they now live in HostConfig + Memory int64 + MemorySwap int64 + CpuShares int64 + Cpuset string +} + +// MountPoint represents a mount point configuration inside the container. +type MountPoint struct { + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go b/Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go new file mode 100644 index 00000000..4bb98119 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go @@ -0,0 +1,11 @@ +// AUTOGENERATED FILE; see ./hack/make/.go-autogen +package dockerversion + +var ( + GITCOMMIT string = "" + VERSION string = "" + + IAMSTATIC string = "true" + INITSHA1 string = "" + INITPATH string = "" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config.go b/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config.go new file mode 100644 index 00000000..d00bc716 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config.go @@ -0,0 +1,227 @@ +package cliconfig + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/system" +) + +const ( + // ConfigFile is the name of config file + ConfigFileName = "config.json" + oldConfigfile = ".dockercfg" + + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexserver = "https://index.docker.io/v1/" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") +) + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), ".docker") + } +} + +// ConfigDir returns the directory the configuration file is stored in +func ConfigDir() string { + return configDir +} + +// SetConfigDir sets the directory the configuration file is stored in +func SetConfigDir(dir string) { + configDir = dir +} + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + filename string // Note: not serialized - for internal use only +} + +// NewConfigFile initilizes an empty configuration file for the given filename 'fn' +func NewConfigFile(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + HTTPHeaders: make(map[string]string), + filename: fn, + } +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and return values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*ConfigFile, error) { + if configDir == "" { + configDir = ConfigDir() + } + + configFile := ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + filename: filepath.Join(configDir, ConfigFileName), + } + + // Try happy path first - latest config file + if _, err := os.Stat(configFile.filename); err == nil { + file, err := os.Open(configFile.filename) + if err != nil { + return &configFile, err + } + defer file.Close() + + if err := json.NewDecoder(file).Decode(&configFile); err != nil { + return &configFile, err + } + + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = DecodeAuth(ac.Auth) + if err != nil { + return &configFile, err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + + return &configFile, nil + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return &configFile, err + } + + // Can't find latest config file so check for the old one + confFile := filepath.Join(homedir.Get(), oldConfigfile) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + + b, err := ioutil.ReadFile(confFile) + if err != nil { + return &configFile, err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return &configFile, fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1]) + if err != nil { + return &configFile, err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth) + if err != nil { + return &configFile, err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return &configFile, nil +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = EncodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + + if err := system.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { + return err + } + + if err := ioutil.WriteFile(configFile.filename, data, 0600); err != nil { + return err + } + + return nil +} + +// Filename returns the name of the configuration file +func (configFile *ConfigFile) Filename() string { + return configFile.filename +} + +// EncodeAuth creates a base64 encoded string to containing authorization information +func EncodeAuth(authConfig *AuthConfig) string { + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// DecodeAuth decodes a base64 encoded string and returns username and password +func DecodeAuth(authStr string) (string, string, error) { + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config_test.go b/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config_test.go new file mode 100644 index 00000000..25fb58a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config_test.go @@ -0,0 +1,188 @@ +package cliconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/homedir" +) + +func TestMissingFile(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestSaveFileToDirs(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + + tmpHome += "/.docker" + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestEmptyFile(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + ioutil.WriteFile(fn, []byte(""), 0600) + + _, err := Load(tmpHome) + if err == nil { + t.Fatalf("Was supposed to fail") + } +} + +func TestEmptyJson(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + ioutil.WriteFile(fn, []byte("{}"), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestOldJson(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + + tmpHome, _ := ioutil.TempDir("", "config-test") + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestNewJson(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestJsonWithPsFormat(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"psFormat":`) || + !strings.Contains(string(buf), "{{.ID}}") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/network/settings.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 00000000..a2e61eb9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,31 @@ +package network + +import "github.com/docker/docker/pkg/nat" + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// Settings stores configuration details about the daemon network config +type Settings struct { + Bridge string + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + HairpinMode bool + IPAddress string + IPPrefixLen int + IPv6Gateway string + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + MacAddress string + NetworkID string + PortMapping map[string]map[string]string // Deprecated + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []Address + SecondaryIPv6Addresses []Address +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags.go b/Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags.go new file mode 100644 index 00000000..cbd0f6bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags.go @@ -0,0 +1,29 @@ +package tags + +import ( + "fmt" + + "github.com/docker/distribution/registry/api/v2" +) + +const DEFAULTTAG = "latest" + +type ErrTagInvalidFormat struct { + name string +} + +func (e ErrTagInvalidFormat) Error() string { + return fmt.Sprintf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed ('.' and '-' are NOT allowed in the initial), minimum 1, maximum 128 in length", e.name) +} + +// ValidateTagName validates the name of a tag +func ValidateTagName(name string) error { + if name == "" { + return fmt.Errorf("tag name can't be empty") + } + + if !v2.TagNameAnchoredRegexp.MatchString(name) { + return ErrTagInvalidFormat{name} + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags_unit_test.go b/Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags_unit_test.go new file mode 100644 index 00000000..5114da10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/graph/tags/tags_unit_test.go @@ -0,0 +1,23 @@ +package tags + +import ( + "testing" +) + +func TestValidTagName(t *testing.T) { + validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err != nil { + t.Errorf("'%s' should've been a valid tag", tag) + } + } +} + +func TestInvalidTagName(t *testing.T) { + validTags := []string{"-9", ".foo", "-test", ".", "-"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err == nil { + t.Errorf("'%s' shouldn't have been a valid tag", tag) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/image.go b/Godeps/_workspace/src/github.com/docker/docker/image/image.go new file mode 100644 index 00000000..a405b538 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/image.go @@ -0,0 +1,59 @@ +package image + +import ( + "encoding/json" + "fmt" + "regexp" + "time" + + "github.com/docker/docker/runconfig" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// Image stores the image configuration. +type Image struct { + // ID a unique 64 character identifier of the image + ID string `json:"id"` + // Parent id of the image + Parent string `json:"parent,omitempty"` + // Comment user added comment + Comment string `json:"comment,omitempty"` + // Created timestamp when image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig runconfig.Config `json:"container_config,omitempty"` + // DockerVersion specifies version on which image is built + DockerVersion string `json:"docker_version,omitempty"` + // Author of the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *runconfig.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 +} + +// NewImgJSON creates an Image configuration from json. +func NewImgJSON(src []byte) (*Image, error) { + ret := &Image{} + + // FIXME: Is there a cleaner way to "purify" the input json? + if err := json.Unmarshal(src, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID '%s' is invalid", id) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md b/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md new file mode 100644 index 00000000..f2c29155 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendent layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go b/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go index 19ee8955..b854227e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go @@ -4,12 +4,18 @@ import ( "bufio" "fmt" "os" + "regexp" "strings" ) -/* -Read in a line delimited file with environment variables enumerated -*/ +var ( + // EnvironmentVariableRegexp A regexp to validate correct environment variables + // Environment variables set by the user must have a name consisting solely of + // alphabetics, numerics, and underscores - the first of which must not be numeric. + EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") +) + +// ParseEnvFile Read in a line delimited file with environment variables enumerated func ParseEnvFile(filename string) ([]string, error) { fh, err := os.Open(filename) if err != nil { @@ -23,14 +29,15 @@ func ParseEnvFile(filename string) ([]string, error) { line := scanner.Text() // line is not empty, and not starting with '#' if len(line) > 0 && !strings.HasPrefix(line, "#") { - if strings.Contains(line, "=") { - data := strings.SplitN(line, "=", 2) + data := strings.SplitN(line, "=", 2) - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + + if !EnvironmentVariableRegexp.MatchString(variable) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)} + } + if len(data) > 1 { // pass the value through, no trimming lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) @@ -40,11 +47,12 @@ func ParseEnvFile(filename string) ([]string, error) { } } } - return lines, nil + return lines, scanner.Err() } var whiteSpaces = " \t" +// ErrBadEnvVariable typed error for bad environment variable type ErrBadEnvVariable struct { msg string } diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/envfile_test.go b/Godeps/_workspace/src/github.com/docker/docker/opts/envfile_test.go new file mode 100644 index 00000000..cd0ca8f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/envfile_test.go @@ -0,0 +1,133 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected a ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 00000000..a29335e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package opts + +import "fmt" + +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 00000000..55eac2aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package opts + +import "fmt" + +var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go b/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go index d960dcd9..b1f95875 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go @@ -5,6 +5,7 @@ import ( "net" ) +// IpOpt type that hold an IP type IpOpt struct { *net.IP } diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/ip_test.go b/Godeps/_workspace/src/github.com/docker/docker/opts/ip_test.go new file mode 100644 index 00000000..b6b526a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIpOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIpOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIpOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IpOpt{IP: &ip} + + invalidIp := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIp) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go index e40c1a33..115ed578 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go @@ -8,70 +8,38 @@ import ( "regexp" "strings" - flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/volume" ) var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - DefaultHTTPHost = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 + DefaultHTTPHost = "127.0.0.1" + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp:// // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter // is not supplied. A better longer term solution would be to use a named // pipe as the default on the Windows daemon. - DefaultHTTPPort = 2375 // Default HTTP Port - DefaultUnixSocket = "/var/run/docker.sock" // Docker daemon by default always listens on the default unix socket + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" ) -func ListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, nil), names, usage) -} - -func MapVar(values map[string]string, names []string, usage string) { - flag.Var(newMapOpt(values, nil), names, usage) -} - -func LogOptsVar(values map[string]string, names []string, usage string) { - flag.Var(newMapOpt(values, nil), names, usage) -} - -func HostListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, ValidateHost), names, usage) -} - -func IPListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, ValidateIPAddress), names, usage) -} - -func DnsSearchListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage) -} - -func IPVar(value *net.IP, names []string, defaultValue, usage string) { - flag.Var(NewIpOpt(value, defaultValue), names, usage) -} - -func LabelListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, ValidateLabel), names, usage) -} - -func UlimitMapVar(values map[string]*ulimit.Ulimit, names []string, usage string) { - flag.Var(NewUlimitOpt(values), names, usage) -} - -// ListOpts type +// ListOpts type that hold a list of values and a validation function. type ListOpts struct { values *[]string validator ValidatorFctType } +// NewListOpts Create a new ListOpts with the specified validator. func NewListOpts(validator ValidatorFctType) ListOpts { var values []string - return *newListOptsRef(&values, validator) + return *NewListOptsRef(&values, validator) } -func newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { return &ListOpts{ values: values, validator: validator, @@ -138,12 +106,14 @@ func (opts *ListOpts) Len() int { return len((*opts.values)) } -//MapOpts type +//MapOpts type that holds a map of values and a validation function. type MapOpts struct { values map[string]string validator ValidatorFctType } +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. func (opts *MapOpts) Set(value string) error { if opts.validator != nil { v, err := opts.validator(value) @@ -165,17 +135,23 @@ func (opts *MapOpts) String() string { return fmt.Sprintf("%v", map[string]string((opts.values))) } -func newMapOpt(values map[string]string, validator ValidatorFctType) *MapOpts { +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } return &MapOpts{ values: values, validator: validator, } } -// Validators +// ValidatorFctType validator that return a validate string and/or an error type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType validator that return a validate list of string and/or an error type ValidatorFctListType func(val string) ([]string, error) +// ValidateAttach Validates that the specified string is a valid attach option. func ValidateAttach(val string) (string, error) { s := strings.ToLower(val) for _, str := range []string{"stdin", "stdout", "stderr"} { @@ -183,9 +159,10 @@ func ValidateAttach(val string) (string, error) { return s, nil } } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR.") + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") } +// ValidateLink Validates that the specified string has a valid link format (containerName:alias). func ValidateLink(val string) (string, error) { if _, _, err := parsers.ParseLink(val); err != nil { return val, err @@ -193,22 +170,53 @@ func ValidateLink(val string) (string, error) { return val, nil } -// ValidatePath will make sure 'val' is in the form: -// [host-dir:]container-path[:rw|ro] - but doesn't validate the mode part +// ValidateDevice Validate a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +func ValidateDevice(val string) (string, error) { + return validatePath(val, false) +} + +// ValidatePath Validate a path for volumes +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:rw|ro] +// It will also validate the mount mode. func ValidatePath(val string) (string, error) { + return validatePath(val, true) +} + +func validatePath(val string, validateMountMode bool) (string, error) { var containerPath string + var mode string if strings.Count(val, ":") > 2 { return val, fmt.Errorf("bad format for volumes: %s", val) } - splited := strings.SplitN(val, ":", 2) - if len(splited) == 1 { + splited := strings.SplitN(val, ":", 3) + if splited[0] == "" { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + switch len(splited) { + case 1: containerPath = splited[0] - val = path.Clean(splited[0]) - } else { + val = path.Clean(containerPath) + case 2: + if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid { + containerPath = splited[0] + mode = splited[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = splited[1] + val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath)) + } + case 3: containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], path.Clean(splited[1])) + mode = splited[2] + if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid { + return val, fmt.Errorf("bad mount mode specified : %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode) } if !path.IsAbs(containerPath) { @@ -217,17 +225,24 @@ func ValidatePath(val string) (string, error) { return val, nil } +// ValidateEnv Validate an environment variable and returns it +// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid. +// If no value is specified, it returns the current value using os.Getenv. func ValidateEnv(val string) (string, error) { arr := strings.Split(val, "=") if len(arr) > 1 { return val, nil } + if !EnvironmentVariableRegexp.MatchString(arr[0]) { + return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)} + } if !doesEnvExist(val) { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } +// ValidateIPAddress Validates an Ip address func ValidateIPAddress(val string) (string, error) { var ip = net.ParseIP(strings.TrimSpace(val)) if ip != nil { @@ -236,6 +251,7 @@ func ValidateIPAddress(val string) (string, error) { return "", fmt.Errorf("%s is not an ip address", val) } +// ValidateMACAddress Validates a MAC address func ValidateMACAddress(val string) (string, error) { _, err := net.ParseMAC(strings.TrimSpace(val)) if err != nil { @@ -244,9 +260,9 @@ func ValidateMACAddress(val string) (string, error) { return val, nil } -// Validates domain for resolvconf search configuration. +// ValidateDNSSearch Validates domain for resolvconf search configuration. // A zero length domain is represented by . -func ValidateDnsSearch(val string) (string, error) { +func ValidateDNSSearch(val string) (string, error) { if val = strings.Trim(val, " "); val == "." { return val, nil } @@ -264,6 +280,8 @@ func validateDomain(val string) (string, error) { return "", fmt.Errorf("%s is not a valid domain", val) } +// ValidateExtraHost Validate that the given string is a valid extrahost and returns it +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" arr := strings.SplitN(val, ":", 2) @@ -276,13 +294,16 @@ func ValidateExtraHost(val string) (string, error) { return val, nil } +// ValidateLabel Validate that the given string is a valid label, and returns it +// Labels are in the form on key=value func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") != 1 { + if strings.Count(val, "=") < 1 { return "", fmt.Errorf("bad attribute format: %s", val) } return val, nil } +// ValidateHost Validate that the given string is a valid host and returns it func ValidateHost(val string) (string, error) { host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) if err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go b/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go index 921009c6..f08df30b 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go @@ -2,7 +2,7 @@ package opts import ( "fmt" - "net" + "os" "strings" "testing" ) @@ -32,7 +32,7 @@ func TestValidateIPAddress(t *testing.T) { func TestMapOpts(t *testing.T) { tmpMap := make(map[string]string) - o := newMapOpt(tmpMap, logOptsValidator) + o := NewMapOpts(tmpMap, logOptsValidator) o.Set("max-size=1") if o.String() != "map[max-size:1]" { t.Errorf("%s != [map[max-size:1]", o.String()) @@ -69,7 +69,7 @@ func TestValidateMACAddress(t *testing.T) { } } -func TestListOpts(t *testing.T) { +func TestListOptsWithoutValidator(t *testing.T) { o := NewListOpts(nil) o.Set("foo") if o.String() != "[foo]" { @@ -79,6 +79,10 @@ func TestListOpts(t *testing.T) { if o.Len() != 2 { t.Errorf("%d != 2", o.Len()) } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } if !o.Get("bar") { t.Error("o.Get(\"bar\") == false") } @@ -86,12 +90,48 @@ func TestListOpts(t *testing.T) { t.Error("o.Get(\"baz\") == true") } o.Delete("foo") - if o.String() != "[bar]" { - t.Errorf("%s != [bar]", o.String()) + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) } } -func TestValidateDnsSearch(t *testing.T) { +func TestValidateDNSSearch(t *testing.T) { valid := []string{ `.`, `a`, @@ -136,14 +176,14 @@ func TestValidateDnsSearch(t *testing.T) { } for _, domain := range valid { - if ret, err := ValidateDnsSearch(domain); err != nil || ret == "" { - t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) } } for _, domain := range invalid { - if ret, err := ValidateDnsSearch(domain); err == nil || ret != "" { - t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) } } } @@ -180,14 +220,251 @@ func TestValidateExtraHosts(t *testing.T) { } } -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } - for _, address := range addresses { - stringAddress := NewIpOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateLink(t *testing.T) { + valid := []string{ + "name", + "dcdfbe62ecd0:alias", + "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", + "angry_torvalds:linus", + } + invalid := map[string]string{ + "": "empty string specified for links", + "too:much:of:it": "bad format for links: too:much:of:it", + } + + for _, link := range valid { + if _, err := ValidateLink(link); err != nil { + t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) + } + } + + for link, expectedError := range invalid { + if _, err := ValidateLink(link); err == nil { + t.Fatalf("ValidateLink(`%q`) should have failed validation", link) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) + } + } + } +} + +func TestValidatePath(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + "/path:rw", + "/path:ro", + "/rw:rw", + } + invalid := map[string]string{ + "": "bad format for volumes: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for volumes: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for volumes: :test", + ":/test": "bad format for volumes: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for volumes: :test:", + "::": "bad format for volumes: ::", + ":::": "bad format for volumes: :::", + "/tmp:::": "bad format for volumes: /tmp:::", + ":/tmp::": "bad format for volumes: :/tmp::", + "path:ro": "path is not an absolute path", + "/path:/path:sw": "bad mount mode specified : sw", + "/path:/path:rwz": "bad mount mode specified : rwz", + } + + for _, path := range valid { + if _, err := ValidatePath(path); err != nil { + t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidatePath(path); err == nil { + t.Fatalf("ValidatePath(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for volumes: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for volumes: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for volumes: :test", + ":/test": "bad format for volumes: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for volumes: :test:", + "::": "bad format for volumes: ::", + ":::": "bad format for volumes: :::", + "/tmp:::": "bad format for volumes: /tmp:::", + ":/tmp::": "bad format for volumes: :/tmp::", + "path:ro": "ro is not an absolute path", + } + + for _, path := range valid { + if _, err := ValidateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestValidateEnv(t *testing.T) { + invalids := map[string]string{ + "some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable", + "asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable", + "1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable", + "123": "poorly formatted environment: variable '123' is not a valid environment variable", + } + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + } + for value, expectedError := range invalids { + _, err := ValidateEnv(value) + if err == nil { + t.Fatalf("Expected ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err) + } + if err.Error() != expectedError { + t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error()) + } + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func TestValidateHost(t *testing.T) { + invalid := map[string]string{ + "anything": "Invalid bind address format: anything", + "something with spaces": "Invalid bind address format: something with spaces", + "://": "Invalid bind address format: ://", + "unknown://": "Invalid bind address format: unknown://", + "tcp://": "Invalid proto, expected tcp: ", + "tcp://:port": "Invalid bind address format: :port", + "tcp://invalid": "Invalid bind address format: invalid", + "tcp://invalid:port": "Invalid bind address format: invalid:port", + } + valid := map[string]string{ + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://:2375": "tcp://127.0.0.1:2375", // default ip address + "tcp://:2376": "tcp://127.0.0.1:2376", // default ip address + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix:///var/run/docker.sock", // default unix:// value + "unix://path/to/socket": "unix://path/to/socket", + } + + for value, errorMessage := range invalid { + if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage { + t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) + } + } + for value, expected := range valid { + if actual, err := ValidateHost(value); err != nil || actual != expected { + t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) } } } diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit.go b/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit.go index 361eadf2..f8d34365 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit.go +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit.go @@ -7,10 +7,13 @@ import ( ) type UlimitOpt struct { - values map[string]*ulimit.Ulimit + values *map[string]*ulimit.Ulimit } -func NewUlimitOpt(ref map[string]*ulimit.Ulimit) *UlimitOpt { +func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*ulimit.Ulimit{} + } return &UlimitOpt{ref} } @@ -20,14 +23,14 @@ func (o *UlimitOpt) Set(val string) error { return err } - o.values[l.Name] = l + (*o.values)[l.Name] = l return nil } func (o *UlimitOpt) String() string { var out []string - for _, v := range o.values { + for _, v := range *o.values { out = append(out, v.String()) } @@ -36,7 +39,7 @@ func (o *UlimitOpt) String() string { func (o *UlimitOpt) GetList() []*ulimit.Ulimit { var ulimits []*ulimit.Ulimit - for _, v := range o.values { + for _, v := range *o.values { ulimits = append(ulimits, v) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit_test.go b/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit_test.go new file mode 100644 index 00000000..3845d1ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/docker/pkg/ulimit" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*ulimit.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 00000000..7307d969 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 00000000..11a707d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,902 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + Archive io.ReadCloser + ArchiveReader io.Reader + Compression int + TarChownOptions struct { + UID, GID int + } + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + ChownOpts *TarChownOptions + Name string + IncludeSourceDir bool + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} +) + +const ( + Uncompressed Compression = iota + Bzip2 + Gzip + Xz +) + +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return CmdStream(exec.Command(args[0], args[1:]...), archive) +} + +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } else { + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Debugf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + var renamedRelFilePath string // For when tar.Options.Name is set + for _, include := range options.IncludeFiles { + // We can't use filepath.Join(srcPath, include) because this will + // clean away a trailing "." or "/" which may be important. + walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator)) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Debugf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + if !exceptions && f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // TODO Windows: Verify if this needs to be os.Pathseparator + // Rename the base resource + if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { + renamedRelFilePath = relFilePath + } + // Set this to make sure the items underneath also get renamed + if options.Name != "" { + relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", filePath, err) + } + return nil + }) + } + }() + + return pipeReader, nil +} + +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + var r io.Reader = tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, nil) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + if err := archiver.Untar(archive, dst, nil); err != nil { + return err + } + return nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// CmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + if input != nil { + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + // Write stdin if any + go func() { + io.Copy(stdin, input) + stdin.Close() + }() + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + pipeR, pipeW := io.Pipe() + errChan := make(chan []byte) + // Collect stderr, we will use it in case of an error + go func() { + errText, e := ioutil.ReadAll(stderr) + if e != nil { + errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") + } + errChan <- errText + }() + // Copy stdout to the returned pipe + go func() { + _, err := io.Copy(pipeW, stdout) + if err != nil { + pipeW.CloseWithError(err) + } + errText := <-errChan + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) + } else { + pipeW.Close() + } + }() + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go new file mode 100644 index 00000000..b93c76cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go @@ -0,0 +1,1204 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.gz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a gzip file.") + } +} + +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") + } +} + +func TestDecompressStreamXz(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a xz file.") + } +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of a uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, err := CmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := path.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := path.Join(tempFolder, "src") + _, err = os.Create(srcFile) + if err != nil { + t.Fatalf("Fail to create the source file") + } + err = UntarPath(srcFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := path.Join(destFolder, srcFile) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := path.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := path.Join(destFolder, srcFile) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := path.Join(tempFolder, "dest") + invalidSrc := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, path.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := path.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := path.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarUntarWithXattr(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 00000000..9e1dfad2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go new file mode 100644 index 00000000..18f45c48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -0,0 +1,60 @@ +// +build !windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 00000000..10db4bd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,50 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "strings" +) + +// canonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go new file mode 100644 index 00000000..72bc71e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -0,0 +1,65 @@ +// +build windows + +package archive + +import ( + "os" + "testing" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 00000000..689d9a21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,383 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type ChangeType int + +const ( + ChangeModify = iota + ChangeAdd + ChangeDelete +) + +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, ".wh.") { + originalFile := file[len(".wh."):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +type FileInfo struct { + parent *FileInfo + name string + stat *system.Stat_t + children map[string]*FileInfo + capability []byte + added bool +} + +func (root *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := root + if path == string(os.PathSeparator) { + return root + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var size int64 + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, _ := os.Lstat(file) + if fileInfo != nil && !fileInfo.IsDir() { + size += fileInfo.Size() + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 00000000..dee8b7c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,285 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 00000000..da70ed37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go new file mode 100644 index 00000000..9d528e61 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -0,0 +1,127 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + //defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go new file mode 100644 index 00000000..509bdb2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go @@ -0,0 +1,495 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 00000000..d780f163 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package archive + +import ( + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.Uid() != newStat.Uid() || + oldStat.Gid() != newStat.Gid() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 00000000..4809b7a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,20 @@ +package archive + +import ( + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 00000000..93c81e84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,308 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) { + if !HasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// AssertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func AssertsDirectory(path string) bool { + return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path) +} + +// HasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func HasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// SpecifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func SpecifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its +// parent directory and its basename in that directory. +func SplitPathDirEntry(localizedPath string) (dir, base string) { + normalizedPath := filepath.ToSlash(localizedPath) + vol := filepath.VolumeName(normalizedPath) + normalizedPath = normalizedPath[len(vol):] + + if normalizedPath == "/" { + // Specifies the root path. + return filepath.FromSlash(vol + normalizedPath), "." + } + + trimmedPath := vol + strings.TrimRight(normalizedPath, "/") + + dir = filepath.FromSlash(path.Dir(trimmedPath)) + base = filepath.FromSlash(path.Base(trimmedPath)) + + return dir, base +} + +// TarResource archives the resource at the given sourcePath into a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourcePath string) (content Archive, err error) { + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) { + // In the case where the source path is a symbolic link AND it ends + // with a path separator, we will want to evaluate the symbolic link. + trimmedPath := sourcePath[:len(sourcePath)-1] + stat, err := os.Lstat(trimmedPath) + if err != nil { + return nil, err + } + + if stat.Mode()&os.ModeSymlink != 0 { + if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil { + return nil, err + } + } + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool +} + +// CopyInfoStatPath stats the given path to create a CopyInfo +// struct representing that resource. If mustExist is true, then +// it is an error if there is no file or directory at the given path. +func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) { + pathInfo := CopyInfo{Path: path} + + fileInfo, err := os.Lstat(path) + + if err == nil { + pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir() + } else if os.IsNotExist(err) && !mustExist { + err = nil + } + + return pathInfo, err +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case AssertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// rebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurance of oldBase with newBase at the beginning of entry names. +func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string) error { + var ( + srcInfo CopyInfo + err error + ) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil { + return err + } + + content, err := TarResource(srcPath) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { + dstInfo, err := CopyInfoStatPath(dstPath, false) + if err != nil { + return err + } + + if !dstInfo.Exists { + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(dstPath) + + dstStat, err := os.Lstat(dstParent) + if err != nil { + return err + } + if !dstStat.IsDir() { + return ErrNotDirectory + } + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go new file mode 100644 index 00000000..dd0b3236 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_test.go @@ -0,0 +1,637 @@ +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q", srcPath, dstPath) + + return CopyResource(srcPath, dstPath) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + content, err := TarResource(filepath.Join(tmpDirA, "file1")) + if err == nil { + content.Close() + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + content, err := TarResource(joinTrailingSep(tmpDirA, "file1")) + if err == nil { + content.Close() + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo.Path) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 00000000..d310a17a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,210 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertantly. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, ".wh..wh.") { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + continue + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, ".wh.") { + originalBase := base[len(".wh."):] + originalPath := filepath.Join(filepath.Dir(path), originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { + return applyLayerHandler(dest, layer, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go new file mode 100644 index 00000000..01ed4372 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go @@ -0,0 +1,190 @@ +package archive + +import ( + "archive/tar" + "testing" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 00000000..cedd46a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..e85aac05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go new file mode 100644 index 00000000..f5cacea8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, ArchiveReader(r)) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 00000000..dfb335c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go new file mode 100644 index 00000000..46ab3669 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..3eaf7f89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,196 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 00000000..b544ffbf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,402 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go index 61137a8f..8154e83f 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go @@ -4,7 +4,7 @@ import ( "os" "runtime" - "github.com/docker/libcontainer/user" + "github.com/opencontainers/runc/libcontainer/user" ) // Key returns the env var name for the user's home dir based on diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go new file mode 100644 index 00000000..f1e5dcd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go @@ -0,0 +1,58 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +// Download requests a given URL and returns an io.Reader +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// parseServerHeader extracts pieces from am HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows) +func ParseServerHeader(hdr string) (*ServerHeader, error) { + re := regexp.MustCompile(`.*\((.+)\).*$`) + r := &ServerHeader{} + if matches := re.FindStringSubmatch(hdr); matches != nil { + r.OS = matches[1] + parts := strings.Split(hdr, "/") + if len(parts) != 2 { + return nil, errors.New("Bad header: '/' missing") + } + r.App = parts[0] + v := strings.Split(parts[1], " ") + if len(v) != 2 { + return nil, errors.New("Bad header: Expected single space") + } + r.Ver = v[0] + return r, nil + } + return nil, errors.New("Bad header: Failed regex match") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go new file mode 100644 index 00000000..5d1aee40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go @@ -0,0 +1,29 @@ +package httputils + +import ( + "mime" + "net/http" +) + +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 00000000..bebc8608 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 00000000..35338600 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,83 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 00000000..801132ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go @@ -0,0 +1,14 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go new file mode 100644 index 00000000..89688632 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 00000000..f231aa9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx += 1 + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go new file mode 100644 index 00000000..de495b56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go @@ -0,0 +1,149 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 00000000..ff09baad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,254 @@ +package ioutils + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io" + "math/big" + "sync" + "time" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// bufReader allows the underlying reader to continue to produce +// output by pre-emptively reading from the wrapped reader. +// This is achieved by buffering this data in bufReader's +// expanding buffer. +type bufReader struct { + sync.Mutex + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond + drainBuf []byte + reuseBuf []byte + maxReuse int64 + resetTimeout time.Duration + bufLenResetThreshold int64 + maxReadDataReset int64 +} + +func NewBufReader(r io.Reader) *bufReader { + var timeout int + if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { + timeout = int(randVal.Int64()) + 180 + } else { + timeout = 300 + } + reader := &bufReader{ + buf: &bytes.Buffer{}, + drainBuf: make([]byte, 1024), + reuseBuf: make([]byte, 4096), + maxReuse: 1000, + resetTimeout: time.Second * time.Duration(timeout), + bufLenResetThreshold: 100 * 1024, + maxReadDataReset: 10 * 1024 * 1024, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { + reader := &bufReader{ + buf: buffer, + drainBuf: drainBuffer, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func (r *bufReader) drain() { + var ( + duration time.Duration + lastReset time.Time + now time.Time + reset bool + bufLen int64 + dataSinceReset int64 + maxBufLen int64 + reuseBufLen int64 + reuseCount int64 + ) + reuseBufLen = int64(len(r.reuseBuf)) + lastReset = time.Now() + for { + n, err := r.reader.Read(r.drainBuf) + dataSinceReset += int64(n) + r.Lock() + bufLen = int64(r.buf.Len()) + if bufLen > maxBufLen { + maxBufLen = bufLen + } + + // Avoid unbounded growth of the buffer over time. + // This has been discovered to be the only non-intrusive + // solution to the unbounded growth of the buffer. + // Alternative solutions such as compression, multiple + // buffers, channels and other similar pieces of code + // were reducing throughput, overall Docker performance + // or simply crashed Docker. + // This solution releases the buffer when specific + // conditions are met to avoid the continuous resizing + // of the buffer for long lived containers. + // + // Move data to the front of the buffer if it's + // smaller than what reuseBuf can store + if bufLen > 0 && reuseBufLen >= bufLen { + n, _ := r.buf.Read(r.reuseBuf) + r.buf.Write(r.reuseBuf[0:n]) + // Take action if the buffer has been reused too many + // times and if there's data in the buffer. + // The timeout is also used as means to avoid doing + // these operations more often or less often than + // required. + // The various conditions try to detect heavy activity + // in the buffer which might be indicators of heavy + // growth of the buffer. + } else if reuseCount >= r.maxReuse && bufLen > 0 { + now = time.Now() + duration = now.Sub(lastReset) + timeoutReached := duration >= r.resetTimeout + + // The timeout has been reached and the + // buffered data couldn't be moved to the front + // of the buffer, so the buffer gets reset. + if timeoutReached && bufLen > reuseBufLen { + reset = true + } + // The amount of buffered data is too high now, + // reset the buffer. + if timeoutReached && maxBufLen >= r.bufLenResetThreshold { + reset = true + } + // Reset the buffer if a certain amount of + // data has gone through the buffer since the + // last reset. + if timeoutReached && dataSinceReset >= r.maxReadDataReset { + reset = true + } + // The buffered data is moved to a fresh buffer, + // swap the old buffer with the new one and + // reset all counters. + if reset { + newbuf := &bytes.Buffer{} + newbuf.ReadFrom(r.buf) + r.buf = newbuf + lastReset = now + reset = false + dataSinceReset = 0 + maxBufLen = 0 + reuseCount = 0 + } + } + if err != nil { + r.err = err + } else { + r.buf.Write(r.drainBuf[0:n]) + } + reuseCount++ + r.wait.Signal() + r.Unlock() + callSchedulerIfNecessary() + if err != nil { + break + } + } +} + +func (r *bufReader) Read(p []byte) (n int, err error) { + r.Lock() + defer r.Unlock() + for { + n, err = r.buf.Read(p) + if n > 0 { + return n, err + } + if r.err != nil { + return 0, r.err + } + r.wait.Wait() + } +} + +func (r *bufReader) Close() error { + closer, ok := r.reader.(io.ReadCloser) + if !ok { + return nil + } + return closer.Close() +} + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go new file mode 100644 index 00000000..0a39b6ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -0,0 +1,216 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { + reader, writer := io.Pipe() + + drainBuffer := make([]byte, 1024) + buffer := bytes.Buffer{} + bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { + reader := strings.NewReader("buffer") + bufreader := NewBufReader(reader) + + if err := bufreader.Close(); err != nil { + t.Fatal(err) + } + +} + +// implements io.ReadCloser +type simpleReaderCloser struct{} + +func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { + return 0, nil +} + +func (r *simpleReaderCloser) Close() error { + return nil +} + +func TestBufReaderCloseWithReaderCloser(t *testing.T) { + reader := &simpleReaderCloser{} + bufreader := NewBufReader(reader) + + err := bufreader.Close() + if err != nil { + t.Fatal(err) + } + +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type repeatedReader struct { + readCount int + maxReads int + data []byte +} + +func newRepeatedReader(max int, data []byte) *repeatedReader { + return &repeatedReader{0, max, data} +} + +func (r *repeatedReader) Read(p []byte) (int, error) { + if r.readCount >= r.maxReads { + return 0, io.EOF + } + r.readCount++ + n := copy(p, r.data) + return n, nil +} + +func testWithData(data []byte, reads int) { + reader := newRepeatedReader(reads, data) + bufReader := NewBufReader(reader) + io.Copy(ioutil.Discard, bufReader) +} + +func Benchmark1M10BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(10) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark1M1024BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark10k32KBytesReads(b *testing.B) { + reads := 10000 + readSize := int64(32 * 1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go new file mode 100644 index 00000000..3c88f29e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go @@ -0,0 +1,6 @@ +// +build !gccgo + +package ioutils + +func callSchedulerIfNecessary() { +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 00000000..c11d02b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go @@ -0,0 +1,13 @@ +// +build gccgo + +package ioutils + +import ( + "runtime" +) + +func callSchedulerIfNecessary() { + //allow or force Go scheduler to switch context, without explicitly + //forcing this will make it hang when using gccgo implementation + runtime.Gosched() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000..25095474 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,47 @@ +package ioutils + +import ( + "io" + "net/http" + "sync" +) + +type WriteFlusher struct { + sync.Mutex + w io.Writer + flusher http.Flusher + flushed bool +} + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.Lock() + defer wf.Unlock() + n, err = wf.w.Write(b) + wf.flushed = true + wf.flusher.Flush() + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.Lock() + defer wf.Unlock() + wf.flushed = true + wf.flusher.Flush() +} + +func (wf *WriteFlusher) Flushed() bool { + wf.Lock() + defer wf.Unlock() + return wf.flushed +} + +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 00000000..43fdc44e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,60 @@ +package ioutils + +import "io" + +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +type NopFlusher struct{} + +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// Wrap a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go new file mode 100644 index 00000000..564b1cd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 00000000..7db1626e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,172 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" +) + +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +type JSONProgress struct { + terminalFd uintptr + Current int `json:"current,omitempty"` + Total int `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(float64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negetive gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated +} + +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + diff = 0 + ) + for { + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + diff = 0 + } else { + diff = len(ids) - line + } + if jm.ID != "" && isTerminal { + // [{diff}A = move cursor up diff rows + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + // [{diff}B = move cursor down diff rows + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 00000000..2e78fa7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,210 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "strings" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20 B/100 B" + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50 B/100 B" + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50 B/40 B" + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now().Unix() + messages := map[JSONMessage][]string{ + // Empty + JSONMessage{}: {"\n", "\n"}, + // Status + JSONMessage{ + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + JSONMessage{ + Time: now, + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now, 0).Format(timeutils.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now, 0).Format(timeutils.RFC3339NanoFixed)), + }, + // Stream over status + JSONMessage{ + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + JSONMessage{ + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + JSONMessage{ + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, true) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got [%v]", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, true) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n%c[%dB", 27, 0), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 0, 27, 0), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 0, 27, 27, 0), + }, + } + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go index 9626a2f6..fa8b0458 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go @@ -2,83 +2,82 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -/* - Package flag implements command-line flag parsing. +// Package mflag implements command-line flag parsing. +// +// Usage: +// +// Define flags using flag.String(), Bool(), Int(), etc. +// +// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. +// import "flag /github.com/docker/docker/pkg/mflag" +// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") +// If you like, you can bind the flag to a variable using the Var() functions. +// var flagvar int +// func init() { +// // -flaghidden will work, but will be hidden from the usage +// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") +// } +// Or you can create custom flags that satisfy the Value interface (with +// pointer receivers) and couple them to flag parsing by +// flag.Var(&flagVal, []string{"name"}, "help message for flagname") +// For such flags, the default value is just the initial value of the variable. +// +// You can also add "deprecated" flags, they are still usable, but are not shown +// in the usage and will display a warning when you try to use them. `#` before +// an option means this option is deprecated, if there is an following option +// without `#` ahead, then that's the replacement, if not, it will just be removed: +// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") +// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or +// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` +// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") +// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` +// so you can only use `-f`. +// +// You can also group one letter flags, bif you declare +// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") +// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") +// you will be able to use the -vs or -sv +// +// After all flags are defined, call +// flag.Parse() +// to parse the command line into the defined flags. +// +// Flags may then be used directly. If you're using the flags themselves, +// they are all pointers; if you bind to variables, they're values. +// fmt.Println("ip has value ", *ip) +// fmt.Println("flagvar has value ", flagvar) +// +// After parsing, the arguments after the flag are available as the +// slice flag.Args() or individually as flag.Arg(i). +// The arguments are indexed from 0 through flag.NArg()-1. +// +// Command line flag syntax: +// -flag +// -flag=x +// -flag="x" +// -flag='x' +// -flag x // non-boolean flags only +// One or two minus signs may be used; they are equivalent. +// The last form is not permitted for boolean flags because the +// meaning of the command +// cmd -x * +// will change if there is a file called 0, false, etc. You must +// use the -flag=false form to turn off a boolean flag. +// +// Flag parsing stops just before the first non-flag argument +// ("-" is a non-flag argument) or after the terminator "--". +// +// Integer flags accept 1234, 0664, 0x1234 and may be negative. +// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. +// Duration flags accept any input valid for time.ParseDuration. +// +// The default set of command-line flags is controlled by +// top-level functions. The FlagSet type allows one to define +// independent sets of flags, such as to implement subcommands +// in a command-line interface. The methods of FlagSet are +// analogous to the top-level functions for the command-line +// flag set. - Usage: - - Define flags using flag.String(), Bool(), Int(), etc. - - This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. - import "flag /github.com/docker/docker/pkg/mflag" - var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") - If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - // -flaghidden will work, but will be hidden from the usage - flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") - } - Or you can create custom flags that satisfy the Value interface (with - pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, []string{"name"}, "help message for flagname") - For such flags, the default value is just the initial value of the variable. - - You can also add "deprecated" flags, they are still usable, but are not shown - in the usage and will display a warning when you try to use them. `#` before - an option means this option is deprecated, if there is an following option - without `#` ahead, then that's the replacement, if not, it will just be removed: - var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") - this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or - this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` - var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") - will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` - so you can only use `-f`. - - You can also group one letter flags, bif you declare - var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") - var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") - you will be able to use the -vs or -sv - - After all flags are defined, call - flag.Parse() - to parse the command line into the defined flags. - - Flags may then be used directly. If you're using the flags themselves, - they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - - After parsing, the arguments after the flag are available as the - slice flag.Args() or individually as flag.Arg(i). - The arguments are indexed from 0 through flag.NArg()-1. - - Command line flag syntax: - -flag - -flag=x - -flag="x" - -flag='x' - -flag x // non-boolean flags only - One or two minus signs may be used; they are equivalent. - The last form is not permitted for boolean flags because the - meaning of the command - cmd -x * - will change if there is a file called 0, false, etc. You must - use the -flag=false form to turn off a boolean flag. - - Flag parsing stops just before the first non-flag argument - ("-" is a non-flag argument) or after the terminator "--". - - Integer flags accept 1234, 0664, 0x1234 and may be negative. - Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. - Duration flags accept any input valid for time.ParseDuration. - - The default set of command-line flags is controlled by - top-level functions. The FlagSet type allows one to define - independent sets of flags, such as to implement subcommands - in a command-line interface. The methods of FlagSet are - analogous to the top-level functions for the command-line - flag set. -*/ package mflag import ( @@ -277,6 +276,7 @@ type Getter interface { // ErrorHandling defines how to handle flag parsing errors. type ErrorHandling int +// ErrorHandling strategies available when a flag parsing error occurs const ( ContinueOnError ErrorHandling = iota ExitOnError @@ -358,28 +358,28 @@ func sortFlags(flags map[string]*Flag) []*Flag { } // Name returns the name of the FlagSet. -func (f *FlagSet) Name() string { - return f.name +func (fs *FlagSet) Name() string { + return fs.name } // Out returns the destination for usage and error messages. -func (f *FlagSet) Out() io.Writer { - if f.output == nil { +func (fs *FlagSet) Out() io.Writer { + if fs.output == nil { return os.Stderr } - return f.output + return fs.output } // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output +func (fs *FlagSet) SetOutput(output io.Writer) { + fs.output = output } // VisitAll visits the flags in lexicographical order, calling fn for each. // It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(f.formal) { +func (fs *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(fs.formal) { fn(flag) } } @@ -392,8 +392,8 @@ func VisitAll(fn func(*Flag)) { // Visit visits the flags in lexicographical order, calling fn for each. // It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(f.actual) { +func (fs *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(fs.actual) { fn(flag) } } @@ -405,13 +405,13 @@ func Visit(fn func(*Flag)) { } // Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.formal[name] +func (fs *FlagSet) Lookup(name string) *Flag { + return fs.formal[name] } -// Indicates whether the specified flag was specified at all on the cmd line -func (f *FlagSet) IsSet(name string) bool { - return f.actual[name] != nil +// IsSet indicates whether the specified flag is set in the given FlagSet +func (fs *FlagSet) IsSet(name string) bool { + return fs.actual[name] != nil } // Lookup returns the Flag structure of the named command-line flag, @@ -420,7 +420,7 @@ func Lookup(name string) *Flag { return CommandLine.formal[name] } -// Indicates whether the specified flag was specified at all on the cmd line +// IsSet indicates whether the specified flag was specified at all on the cmd line. func IsSet(name string) bool { return CommandLine.IsSet(name) } @@ -443,15 +443,15 @@ type nArgRequirement struct { // The first parameter can be Exact, Max, or Min to respectively specify the exact, // the maximum, or the minimal number of arguments required. // The actual check is done in FlagSet.CheckArgs(). -func (f *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { - f.nArgRequirements = append(f.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) +func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { + fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) } // CheckArgs uses the requirements set by FlagSet.Require() to validate // the number of arguments. If the requirements are not met, // an error message string is returned. -func (f *FlagSet) CheckArgs() (message string) { - for _, req := range f.nArgRequirements { +func (fs *FlagSet) CheckArgs() (message string) { + for _, req := range fs.nArgRequirements { var arguments string if req.N == 1 { arguments = "1 argument" @@ -460,20 +460,20 @@ func (f *FlagSet) CheckArgs() (message string) { } str := func(kind string) string { - return fmt.Sprintf("%q requires %s%s", f.name, kind, arguments) + return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) } switch req.Type { case Exact: - if f.NArg() != req.N { + if fs.NArg() != req.N { return str("") } case Max: - if f.NArg() > req.N { + if fs.NArg() > req.N { return str("a maximum of ") } case Min: - if f.NArg() < req.N { + if fs.NArg() < req.N { return str("a minimum of ") } } @@ -482,18 +482,18 @@ func (f *FlagSet) CheckArgs() (message string) { } // Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - flag, ok := f.formal[name] +func (fs *FlagSet) Set(name, value string) error { + flag, ok := fs.formal[name] if !ok { return fmt.Errorf("no such flag -%v", name) } if err := flag.Value.Set(value); err != nil { return err } - if f.actual == nil { - f.actual = make(map[string]*Flag) + if fs.actual == nil { + fs.actual = make(map[string]*Flag) } - f.actual[name] = flag + fs.actual[name] = flag return nil } @@ -504,8 +504,8 @@ func Set(name, value string) error { // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - writer := tabwriter.NewWriter(f.Out(), 20, 1, 3, ' ', 0) +func (fs *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) home := homedir.Get() // Don't substitute when HOME is / @@ -514,11 +514,11 @@ func (f *FlagSet) PrintDefaults() { } // Add a blank line between cmd description and list of options - if f.FlagCount() > 0 { + if fs.FlagCount() > 0 { fmt.Fprintln(writer, "") } - f.VisitAll(func(flag *Flag) { + fs.VisitAll(func(flag *Flag) { format := " -%s=%s" names := []string{} for _, name := range flag.Names { @@ -526,7 +526,7 @@ func (f *FlagSet) PrintDefaults() { names = append(names, name) } } - if len(names) > 0 { + if len(names) > 0 && len(flag.Usage) > 0 { val := flag.DefValue if home != "" && strings.HasPrefix(val, home) { @@ -551,13 +551,13 @@ func PrintDefaults() { } // defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - if f.name == "" { - fmt.Fprintf(f.Out(), "Usage:\n") +func defaultUsage(fs *FlagSet) { + if fs.name == "" { + fmt.Fprintf(fs.Out(), "Usage:\n") } else { - fmt.Fprintf(f.Out(), "Usage of %s:\n", f.name) + fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) } - f.PrintDefaults() + fs.PrintDefaults() } // NOTE: Usage is not just defaultUsage(CommandLine) @@ -578,12 +578,12 @@ var ShortUsage = func() { } // FlagCount returns the number of flags that have been defined. -func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) } +func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } // FlagCountUndeprecated returns the number of undeprecated flags that have been defined. -func (f *FlagSet) FlagCountUndeprecated() int { +func (fs *FlagSet) FlagCountUndeprecated() int { count := 0 - for _, flag := range sortFlags(f.formal) { + for _, flag := range sortFlags(fs.formal) { for _, name := range flag.Names { if name[0] != '#' { count++ @@ -595,18 +595,18 @@ func (f *FlagSet) FlagCountUndeprecated() int { } // NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } +func (fs *FlagSet) NFlag() int { return len(fs.actual) } // NFlag returns the number of command-line flags that have been set. func NFlag() int { return len(CommandLine.actual) } // Arg returns the i'th argument. Arg(0) is the first remaining argument // after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { +func (fs *FlagSet) Arg(i int) string { + if i < 0 || i >= len(fs.args) { return "" } - return f.args[i] + return fs.args[i] } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument @@ -616,21 +616,21 @@ func Arg(i int) string { } // NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } +func (fs *FlagSet) NArg() int { return len(fs.args) } // NArg is the number of arguments remaining after flags have been processed. func NArg() int { return len(CommandLine.args) } // Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } +func (fs *FlagSet) Args() []string { return fs.args } // Args returns the non-flag command-line arguments. func Args() []string { return CommandLine.args } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { - f.Var(newBoolValue(value, p), names, usage) +func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + fs.Var(newBoolValue(value, p), names, usage) } // BoolVar defines a bool flag with specified name, default value, and usage string. @@ -641,9 +641,9 @@ func BoolVar(p *bool, names []string, value bool, usage string) { // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { +func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { p := new(bool) - f.BoolVar(p, names, value, usage) + fs.BoolVar(p, names, value, usage) return p } @@ -655,8 +655,8 @@ func Bool(names []string, value bool, usage string) *bool { // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { - f.Var(newIntValue(value, p), names, usage) +func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { + fs.Var(newIntValue(value, p), names, usage) } // IntVar defines an int flag with specified name, default value, and usage string. @@ -667,9 +667,9 @@ func IntVar(p *int, names []string, value int, usage string) { // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(names []string, value int, usage string) *int { +func (fs *FlagSet) Int(names []string, value int, usage string) *int { p := new(int) - f.IntVar(p, names, value, usage) + fs.IntVar(p, names, value, usage) return p } @@ -681,8 +681,8 @@ func Int(names []string, value int, usage string) *int { // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { - f.Var(newInt64Value(value, p), names, usage) +func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + fs.Var(newInt64Value(value, p), names, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. @@ -693,9 +693,9 @@ func Int64Var(p *int64, names []string, value int64, usage string) { // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { +func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { p := new(int64) - f.Int64Var(p, names, value, usage) + fs.Int64Var(p, names, value, usage) return p } @@ -707,8 +707,8 @@ func Int64(names []string, value int64, usage string) *int64 { // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { - f.Var(newUintValue(value, p), names, usage) +func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + fs.Var(newUintValue(value, p), names, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. @@ -719,9 +719,9 @@ func UintVar(p *uint, names []string, value uint, usage string) { // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { +func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { p := new(uint) - f.UintVar(p, names, value, usage) + fs.UintVar(p, names, value, usage) return p } @@ -733,8 +733,8 @@ func Uint(names []string, value uint, usage string) *uint { // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { - f.Var(newUint64Value(value, p), names, usage) +func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + fs.Var(newUint64Value(value, p), names, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. @@ -745,9 +745,9 @@ func Uint64Var(p *uint64, names []string, value uint64, usage string) { // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { +func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { p := new(uint64) - f.Uint64Var(p, names, value, usage) + fs.Uint64Var(p, names, value, usage) return p } @@ -759,8 +759,8 @@ func Uint64(names []string, value uint64, usage string) *uint64 { // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { - f.Var(newStringValue(value, p), names, usage) +func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { + fs.Var(newStringValue(value, p), names, usage) } // StringVar defines a string flag with specified name, default value, and usage string. @@ -771,9 +771,9 @@ func StringVar(p *string, names []string, value string, usage string) { // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(names []string, value string, usage string) *string { +func (fs *FlagSet) String(names []string, value string, usage string) *string { p := new(string) - f.StringVar(p, names, value, usage) + fs.StringVar(p, names, value, usage) return p } @@ -785,8 +785,8 @@ func String(names []string, value string, usage string) *string { // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { - f.Var(newFloat64Value(value, p), names, usage) +func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + fs.Var(newFloat64Value(value, p), names, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. @@ -797,9 +797,9 @@ func Float64Var(p *float64, names []string, value float64, usage string) { // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { +func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { p := new(float64) - f.Float64Var(p, names, value, usage) + fs.Float64Var(p, names, value, usage) return p } @@ -811,8 +811,8 @@ func Float64(names []string, value float64, usage string) *float64 { // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - f.Var(newDurationValue(value, p), names, usage) +func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + fs.Var(newDurationValue(value, p), names, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. @@ -823,9 +823,9 @@ func DurationVar(p *time.Duration, names []string, value time.Duration, usage st // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { +func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) - f.DurationVar(p, names, value, usage) + fs.DurationVar(p, names, value, usage) return p } @@ -841,26 +841,26 @@ func Duration(names []string, value time.Duration, usage string) *time.Duration // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, names []string, usage string) { +func (fs *FlagSet) Var(value Value, names []string, usage string) { // Remember the default value as a string; it won't change. flag := &Flag{names, usage, value, value.String()} for _, name := range names { name = strings.TrimPrefix(name, "#") - _, alreadythere := f.formal[name] + _, alreadythere := fs.formal[name] if alreadythere { var msg string - if f.name == "" { + if fs.name == "" { msg = fmt.Sprintf("flag redefined: %s", name) } else { - msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) + msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) } - fmt.Fprintln(f.Out(), msg) + fmt.Fprintln(fs.Out(), msg) panic(msg) // Happens only if flags are declared with identical names } - if f.formal == nil { - f.formal = make(map[string]*Flag) + if fs.formal == nil { + fs.formal = make(map[string]*Flag) } - f.formal[name] = flag + fs.formal[name] = flag } } @@ -876,26 +876,26 @@ func Var(value Value, names []string, usage string) { // failf prints to standard error a formatted error and usage message and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { +func (fs *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) - fmt.Fprintln(f.Out(), err) - if os.Args[0] == f.name { - fmt.Fprintf(f.Out(), "See '%s --help'.\n", os.Args[0]) + fmt.Fprintln(fs.Out(), err) + if os.Args[0] == fs.name { + fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) } else { - fmt.Fprintf(f.Out(), "See '%s %s --help'.\n", os.Args[0], f.name) + fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) } return err } // usage calls the Usage method for the flag set, or the usage function if // the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { +func (fs *FlagSet) usage() { + if fs == CommandLine { Usage() - } else if f.Usage == nil { - defaultUsage(f) + } else if fs.Usage == nil { + defaultUsage(fs) } else { - f.Usage() + fs.Usage() } } @@ -934,25 +934,25 @@ func trimQuotes(str string) string { } // parseOne parses one flag. It reports whether a flag was seen. -func (f *FlagSet) parseOne() (bool, string, error) { - if len(f.args) == 0 { +func (fs *FlagSet) parseOne() (bool, string, error) { + if len(fs.args) == 0 { return false, "", nil } - s := f.args[0] + s := fs.args[0] if len(s) == 0 || s[0] != '-' || len(s) == 1 { return false, "", nil } if s[1] == '-' && len(s) == 2 { // "--" terminates the flags - f.args = f.args[1:] + fs.args = fs.args[1:] return false, "", nil } name := s[1:] if len(name) == 0 || name[0] == '=' { - return false, "", f.failf("bad flag syntax: %s", s) + return false, "", fs.failf("bad flag syntax: %s", s) } // it's a flag. does it have an argument? - f.args = f.args[1:] + fs.args = fs.args[1:] hasValue := false value := "" if i := strings.Index(name, "="); i != -1 { @@ -961,44 +961,44 @@ func (f *FlagSet) parseOne() (bool, string, error) { name = name[:i] } - m := f.formal + m := fs.formal flag, alreadythere := m[name] // BUG if !alreadythere { if name == "-help" || name == "help" || name == "h" { // special case for nice help message. - f.usage() + fs.usage() return false, "", ErrHelp } if len(name) > 0 && name[0] == '-' { - return false, "", f.failf("flag provided but not defined: -%s", name) + return false, "", fs.failf("flag provided but not defined: -%s", name) } return false, name, ErrRetry } if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg if hasValue { if err := fv.Set(value); err != nil { - return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) + return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) } } else { fv.Set("true") } } else { // It must have a value, which might be the next argument. - if !hasValue && len(f.args) > 0 { + if !hasValue && len(fs.args) > 0 { // value is the next arg hasValue = true - value, f.args = f.args[0], f.args[1:] + value, fs.args = fs.args[0], fs.args[1:] } if !hasValue { - return false, "", f.failf("flag needs an argument: -%s", name) + return false, "", fs.failf("flag needs an argument: -%s", name) } if err := flag.Value.Set(value); err != nil { - return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) + return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) } } - if f.actual == nil { - f.actual = make(map[string]*Flag) + if fs.actual == nil { + fs.actual = make(map[string]*Flag) } - f.actual[name] = flag + fs.actual[name] = flag for i, n := range flag.Names { if n == fmt.Sprintf("#%s", name) { replacement := "" @@ -1009,9 +1009,9 @@ func (f *FlagSet) parseOne() (bool, string, error) { } } if replacement != "" { - fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) } else { - fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) } } } @@ -1022,11 +1022,11 @@ func (f *FlagSet) parseOne() (bool, string, error) { // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. // The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - f.parsed = true - f.args = arguments +func (fs *FlagSet) Parse(arguments []string) error { + fs.parsed = true + fs.args = arguments for { - seen, name, err := f.parseOne() + seen, name, err := fs.parseOne() if seen { continue } @@ -1037,13 +1037,13 @@ func (f *FlagSet) Parse(arguments []string) error { if len(name) > 1 { err = nil for _, letter := range strings.Split(name, "") { - f.args = append([]string{"-" + letter}, f.args...) - seen2, _, err2 := f.parseOne() + fs.args = append([]string{"-" + letter}, fs.args...) + seen2, _, err2 := fs.parseOne() if seen2 { continue } if err2 != nil { - err = f.failf("flag provided but not defined: -%s", name) + err = fs.failf("flag provided but not defined: -%s", name) break } } @@ -1051,10 +1051,10 @@ func (f *FlagSet) Parse(arguments []string) error { continue } } else { - err = f.failf("flag provided but not defined: -%s", name) + err = fs.failf("flag provided but not defined: -%s", name) } } - switch f.errorHandling { + switch fs.errorHandling { case ContinueOnError: return err case ExitOnError: @@ -1067,46 +1067,48 @@ func (f *FlagSet) Parse(arguments []string) error { } // ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls cmd.Parse(args) and prints a relevant error message if there are +// calls fs.Parse(args) and prints a relevant error message if there are // incorrect number of arguments. It returns error only if error handling is // set to ContinueOnError and parsing fails. If error handling is set to // ExitOnError, it's safe to ignore the return value. -func (cmd *FlagSet) ParseFlags(args []string, withHelp bool) error { +func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { var help *bool if withHelp { - help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage") + help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") } - if err := cmd.Parse(args); err != nil { + if err := fs.Parse(args); err != nil { return err } if help != nil && *help { - cmd.SetOutput(os.Stdout) - cmd.Usage() + fs.SetOutput(os.Stdout) + fs.Usage() os.Exit(0) } - if str := cmd.CheckArgs(); str != "" { - cmd.SetOutput(os.Stderr) - cmd.ReportError(str, withHelp) - cmd.ShortUsage() + if str := fs.CheckArgs(); str != "" { + fs.SetOutput(os.Stderr) + fs.ReportError(str, withHelp) + fs.ShortUsage() os.Exit(1) } return nil } -func (cmd *FlagSet) ReportError(str string, withHelp bool) { +// ReportError is a utility method that prints a user-friendly message +// containing the error that occured during parsing and a suggestion to get help +func (fs *FlagSet) ReportError(str string, withHelp bool) { if withHelp { - if os.Args[0] == cmd.Name() { + if os.Args[0] == fs.Name() { str += ".\nSee '" + os.Args[0] + " --help'" } else { - str += ".\nSee '" + os.Args[0] + " " + cmd.Name() + " --help'" + str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" } } - fmt.Fprintf(cmd.Out(), "docker: %s.\n", str) + fmt.Fprintf(fs.Out(), "docker: %s.\n", str) } -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed +// Parsed reports whether fs.Parse has been called. +func (fs *FlagSet) Parsed() bool { + return fs.parsed } // Parse parses the command-line flags from os.Args[1:]. Must be called @@ -1139,7 +1141,61 @@ func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { // Init sets the name and error handling property for a flag set. // By default, the zero FlagSet uses an empty name and the // ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling +func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { + fs.name = name + fs.errorHandling = errorHandling +} + +type mergeVal struct { + Value + key string + fset *FlagSet +} + +func (v mergeVal) Set(s string) error { + return v.fset.Set(v.key, s) +} + +func (v mergeVal) IsBoolFlag() bool { + if b, ok := v.Value.(boolFlag); ok { + return b.IsBoolFlag() + } + return false +} + +// Merge is an helper function that merges n FlagSets into a single dest FlagSet +// In case of name collision between the flagsets it will apply +// the destination FlagSet's errorHandling behaviour. +func Merge(dest *FlagSet, flagsets ...*FlagSet) error { + for _, fset := range flagsets { + for k, f := range fset.formal { + if _, ok := dest.formal[k]; ok { + var err error + if fset.name == "" { + err = fmt.Errorf("flag redefined: %s", k) + } else { + err = fmt.Errorf("%s flag redefined: %s", fset.name, k) + } + fmt.Fprintln(fset.Out(), err.Error()) + // Happens only if flags are declared with identical names + switch dest.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + newF := *f + newF.Value = mergeVal{f.Value, k, fset} + dest.formal[k] = &newF + } + } + return nil +} + +// IsEmpty reports if the FlagSet is actually empty. +func (fs *FlagSet) IsEmpty() bool { + return len(fs.actual) == 0 } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go index 9a20df21..ed7216e5 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go @@ -5,7 +5,7 @@ import ( ) // GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*MountInfo, error) { +func GetMounts() ([]*Info, error) { return parseMountTable() } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go index 8ea08648..e3fc3535 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go @@ -1,10 +1,10 @@ package mount -// MountInfo reveals information about a particular mounted filesystem. This +// Info reveals information about a particular mounted filesystem. This // struct is populated from the content in the /proc//mountinfo file. -type MountInfo struct { - // Id is a unique identifier of the mount (may be reused after umount). - Id int +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int // Parent indicates the ID of the mount parent (or of self for the top of the // mount tree). diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index add7c3b0..4f32edcd 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -15,7 +15,7 @@ import ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. -func parseMountTable() ([]*MountInfo, error) { +func parseMountTable() ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) @@ -29,9 +29,9 @@ func parseMountTable() ([]*MountInfo, error) { header.Len = count header.Data = uintptr(unsafe.Pointer(rawEntries)) - var out []*MountInfo + var out []*Info for _, entry := range entries { - var mountinfo MountInfo + var mountinfo Info mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go index 351a58ea..be69fee1 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -30,7 +30,7 @@ const ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts -func parseMountTable() ([]*MountInfo, error) { +func parseMountTable() ([]*Info, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err @@ -40,10 +40,10 @@ func parseMountTable() ([]*MountInfo, error) { return parseInfoFile(f) } -func parseInfoFile(r io.Reader) ([]*MountInfo, error) { +func parseInfoFile(r io.Reader) ([]*Info, error) { var ( s = bufio.NewScanner(r) - out = []*MountInfo{} + out = []*Info{} ) for s.Scan() { @@ -52,13 +52,13 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } var ( - p = &MountInfo{} + p = &Info{} text = s.Text() optionalFields string ) if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.Id, &p.Parent, &p.Major, &p.Minor, + &p.ID, &p.Parent, &p.Major, &p.Minor, &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } @@ -84,7 +84,7 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { // PidMountInfo collects the mounts for a specific process ID. If the process // ID is unknown, it is better to use `GetMounts` which will inspect // "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*MountInfo, error) { +func PidMountInfo(pid int) ([]*Info, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { return nil, err diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go index e92b7e2c..812d12e8 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go @@ -457,8 +457,8 @@ func TestParseFedoraMountinfoFields(t *testing.T) { if len(infos) != expectedLength { t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) } - mi := MountInfo{ - Id: 15, + mi := Info{ + ID: 15, Parent: 35, Major: 0, Minor: 3, diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go index 352336b9..8245f01d 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -7,6 +7,6 @@ import ( "runtime" ) -func parseMountTable() ([]*MountInfo, error) { +func parseMountTable() ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go index da9aa150..4a8d22f0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go @@ -107,7 +107,7 @@ func TestSubtreePrivate(t *testing.T) { } // Testing that when a target is a shared mount, -// then child mounts propogate to the source +// then child mounts propagate to the source func TestSubtreeShared(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/nat.go similarity index 64% rename from Godeps/_workspace/src/github.com/docker/docker/nat/nat.go rename to Godeps/_workspace/src/github.com/docker/docker/pkg/nat/nat.go index 2cec2e86..1fbb13e6 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/nat.go @@ -13,26 +13,41 @@ import ( ) const ( - PortSpecTemplate = "ip:hostPort:containerPort" - PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort" + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" ) +// PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { - HostIp string + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number HostPort string } +// PortMap is a collection of PortBinding indexed by Port type PortMap map[Port][]PortBinding +// PortSet is a collection of structs indexed by Port type PortSet map[Port]struct{} -// 80/tcp +// Port is a string containing port number and protocol in the format "80/tcp" type Port string -func NewPort(proto, port string) Port { - return Port(fmt.Sprintf("%s/%s", port, proto)) +// NewPort creates a new instance of a Port given a protocol and port number +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portInt, err := ParsePort(port) + if err != nil { + return "", err + } + + return Port(fmt.Sprintf("%d/%s", portInt, proto)), nil } +// ParsePort parses the port number string and returns an int func ParsePort(rawPort string) (int, error) { if len(rawPort) == 0 { return 0, nil @@ -44,25 +59,32 @@ func ParsePort(rawPort string) (int, error) { return int(port), nil } +// Proto returns the protocol of a Port func (p Port) Proto() string { proto, _ := SplitProtoPort(string(p)) return proto } +// Port returns the port number of a Port func (p Port) Port() string { _, port := SplitProtoPort(string(p)) return port } +// Int returns the port number of a Port as an int func (p Port) Int() int { - port, err := ParsePort(p.Port()) - if err != nil { - panic(err) + portStr := p.Port() + if len(portStr) == 0 { + return 0 } - return port + + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := strconv.ParseUint(portStr, 10, 16) + return int(port) } -// Splits a port in the format of proto/port +// SplitProtoPort splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { parts := strings.Split(rawPort, "/") l := len(parts) @@ -87,8 +109,8 @@ func validateProto(proto string) bool { return false } -// We will receive port specs in the format of ip:public:private/proto and these need to be -// parsed in the internal types +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { var ( exposedPorts = make(map[Port]struct{}, len(ports)) @@ -108,19 +130,19 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, rawPort = fmt.Sprintf(":%s", rawPort) } - parts, err := parsers.PartParser(PortSpecTemplate, rawPort) + parts, err := parsers.PartParser(portSpecTemplate, rawPort) if err != nil { return nil, nil, err } var ( containerPort = parts["containerPort"] - rawIp = parts["ip"] + rawIP = parts["ip"] hostPort = parts["hostPort"] ) - if rawIp != "" && net.ParseIP(rawIp) == nil { - return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIp) + if rawIP != "" && net.ParseIP(rawIP) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) } if containerPort == "" { return nil, nil, fmt.Errorf("No port specified: %s", rawPort) @@ -152,13 +174,16 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, if len(hostPort) > 0 { hostPort = strconv.FormatUint(startHostPort+i, 10) } - port := NewPort(strings.ToLower(proto), containerPort) + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, nil, err + } if _, exists := exposedPorts[port]; !exists { exposedPorts[port] = struct{}{} } binding := PortBinding{ - HostIp: rawIp, + HostIP: rawIP, HostPort: hostPort, } bslice, exists := bindings[port] diff --git a/Godeps/_workspace/src/github.com/docker/docker/nat/nat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/nat_test.go similarity index 61% rename from Godeps/_workspace/src/github.com/docker/docker/nat/nat_test.go rename to Godeps/_workspace/src/github.com/docker/docker/pkg/nat/nat_test.go index 376857fd..d9472cc7 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/nat/nat_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/nat_test.go @@ -42,7 +42,11 @@ func TestParsePort(t *testing.T) { } func TestPort(t *testing.T) { - p := NewPort("tcp", "1234") + p, err := NewPort("tcp", "1234") + + if err != nil { + t.Fatalf("tcp, 1234 had a parsing issue: %v", err) + } if string(p) != "1234/tcp" { t.Fatal("tcp, 1234 did not result in the string 1234/tcp") @@ -59,6 +63,11 @@ func TestPort(t *testing.T) { if p.Int() != 1234 { t.Fatal("port int value was not 1234") } + + p, err = NewPort("tcp", "asd1234") + if err == nil { + t.Fatal("tcp, asd1234 was supposed to fail") + } } func TestSplitProtoPort(t *testing.T) { @@ -124,8 +133,8 @@ func TestParsePortSpecs(t *testing.T) { t.Fatalf("%s should have exactly one binding", portspec) } - if bindings[0].HostIp != "" { - t.Fatalf("HostIp should not be set for %s", portspec) + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) } if bindings[0].HostPort != "" { @@ -154,8 +163,8 @@ func TestParsePortSpecs(t *testing.T) { t.Fatalf("%s should have exactly one binding", portspec) } - if bindings[0].HostIp != "" { - t.Fatalf("HostIp should not be set for %s", portspec) + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) } if bindings[0].HostPort != port { @@ -184,8 +193,8 @@ func TestParsePortSpecs(t *testing.T) { t.Fatalf("%s should have exactly one binding", portspec) } - if bindings[0].HostIp != "0.0.0.0" { - t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec) + if bindings[0].HostIP != "0.0.0.0" { + t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) } if bindings[0].HostPort != port { @@ -226,8 +235,8 @@ func TestParsePortSpecsWithRange(t *testing.T) { t.Fatalf("%s should have exactly one binding", portspec) } - if bindings[0].HostIp != "" { - t.Fatalf("HostIp should not be set for %s", portspec) + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) } if bindings[0].HostPort != "" { @@ -255,8 +264,8 @@ func TestParsePortSpecsWithRange(t *testing.T) { t.Fatalf("%s should have exactly one binding", portspec) } - if bindings[0].HostIp != "" { - t.Fatalf("HostIp should not be set for %s", portspec) + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) } if bindings[0].HostPort != port { @@ -280,7 +289,7 @@ func TestParsePortSpecsWithRange(t *testing.T) { for portspec, bindings := range bindingMap { _, port := SplitProtoPort(string(portspec)) - if len(bindings) != 1 || bindings[0].HostIp != "0.0.0.0" || bindings[0].HostPort != port { + if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { t.Fatalf("Expect single binding to port %s but found %s", port, bindings) } } @@ -291,3 +300,162 @@ func TestParsePortSpecsWithRange(t *testing.T) { t.Fatal("Received no error while trying to parse a hostname instead of ip") } } + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + if len(ports) != 0 { + t.Logf("Expected nil got %d", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %d", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/nat/sort.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/sort.go similarity index 92% rename from Godeps/_workspace/src/github.com/docker/docker/nat/sort.go rename to Godeps/_workspace/src/github.com/docker/docker/pkg/nat/sort.go index fa584c17..0a9dd078 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/nat/sort.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/sort.go @@ -26,6 +26,9 @@ func (s *portSorter) Less(i, j int) bool { return s.by(ip, jp) } +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` func Sort(ports []Port, predicate func(i, j Port) bool) { s := &portSorter{ports, predicate} sort.Sort(s) diff --git a/Godeps/_workspace/src/github.com/docker/docker/nat/sort_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/sort_test.go similarity index 92% rename from Godeps/_workspace/src/github.com/docker/docker/nat/sort_test.go rename to Godeps/_workspace/src/github.com/docker/docker/pkg/nat/sort_test.go index ba24cdbc..88ed9111 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/nat/sort_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/nat/sort_test.go @@ -59,10 +59,10 @@ func TestSortPortMap(t *testing.T) { }, Port("6379/tcp"): []PortBinding{ {}, - {HostIp: "0.0.0.0", HostPort: "32749"}, + {HostIP: "0.0.0.0", HostPort: "32749"}, }, Port("9999/tcp"): []PortBinding{ - {HostIp: "0.0.0.0", HostPort: "40000"}, + {HostIP: "0.0.0.0", HostPort: "40000"}, }, } @@ -77,7 +77,7 @@ func TestSortPortMap(t *testing.T) { t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) } if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ - {HostIp: "0.0.0.0", HostPort: "32749"}, + {HostIP: "0.0.0.0", HostPort: "32749"}, {}, }) { t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go index df5486d5..6c394f16 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go @@ -1,3 +1,5 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. package filters import ( @@ -7,16 +9,22 @@ import ( "strings" ) +// Args stores filter arguments as map key:{array of values}. +// It contains a aggregation of the list of arguments (which are in the form +// of -f 'key=value') based on the key, and store values for the same key +// in an slice. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {'label': {'label1=1','label2=2'}, 'image.name', {'ubuntu'}} type Args map[string][]string -// Parse the argument to the filter flag. Like +// ParseFlag parses the argument to the filter flag. Like // // `docker ps -f 'created=today' -f 'image.name=ubuntu*'` // // If prev map is provided, then it is appended to, and returned. By default a new // map is created. func ParseFlag(arg string, prev Args) (Args, error) { - var filters Args = prev + filters := prev if prev == nil { filters = Args{} } @@ -25,7 +33,7 @@ func ParseFlag(arg string, prev Args) (Args, error) { } if !strings.Contains(arg, "=") { - return filters, ErrorBadFormat + return filters, ErrBadFormat } f := strings.SplitN(arg, "=", 2) @@ -36,9 +44,10 @@ func ParseFlag(arg string, prev Args) (Args, error) { return filters, nil } -var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") -// packs the Args into an string for easy transport from client to server +// ToParam packs the Args into an string for easy transport from client to server. func ToParam(a Args) (string, error) { // this way we don't URL encode {}, just empty space if len(a) == 0 { @@ -52,7 +61,7 @@ func ToParam(a Args) (string, error) { return string(buf), nil } -// unpacks the filter Args +// FromParam unpacks the filter Args. func FromParam(p string) (Args, error) { args := Args{} if len(p) == 0 { @@ -64,6 +73,11 @@ func FromParam(p string) (Args, error) { return args, nil } +// MatchKVList returns true if the values for the specified field maches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label':{'label1=1','label2=2','label3=3'}} +// it returns true. func (filters Args) MatchKVList(field string, sources map[string]string) bool { fieldValues := filters[field] @@ -96,6 +110,10 @@ outer: return true } +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. func (filters Args) Match(field, source string) bool { fieldValues := filters[field] diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go index a2483502..eb9fcef9 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go @@ -30,33 +30,75 @@ func TestParseArgs(t *testing.T) { } } -func TestParam(t *testing.T) { +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args == nil || len(args) != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { a := Args{ "created": []string{"today"}, "image.name": []string{"ubuntu*", "*untu"}, } - v, err := ToParam(a) + _, err := ToParam(a) if err != nil { t.Errorf("failed to marshal the filters: %s", err) } - v1, err := FromParam(v) - if err != nil { - t.Errorf("%s", err) +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, } - for key, vals := range v1 { - if _, ok := a[key]; !ok { - t.Errorf("could not find key %s in original set", key) + valids := map[string]Args{ + `{"key": ["value"]}`: { + "key": {"value"}, + }, + `{"key": ["value1", "value2"]}`: { + "key": {"value1", "value2"}, + }, + `{"key1": ["value1"], "key2": ["value2"]}`: { + "key1": {"value1"}, + "key2": {"value2"}, + }, + } + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) } - sort.Strings(vals) - sort.Strings(a[key]) - if len(vals) != len(a[key]) { - t.Errorf("value lengths ought to match") - continue + } + for json, expectedArgs := range valids { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) } - for i := range vals { - if vals[i] != a[key][i] { - t.Errorf("expected %s, but got %s", a[key][i], vals[i]) + if len(args) != len(expectedArgs) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs { + values := args[key] + sort.Strings(values) + sort.Strings(expectedValues) + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for index, expectedValue := range expectedValues { + if values[index] != expectedValue { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } } } } @@ -76,3 +118,101 @@ func TestEmpty(t *testing.T) { t.Errorf("these should both be empty sets") } } + +func TestArgsMatchKVList(t *testing.T) { + // empty sources + args := Args{ + "created": []string{"today"}, + } + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value1"}, + }: "labels", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"today"}, + }: "created", + &Args{ + "created": []string{"today"}, + "labels": []string{"key4"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value3"}, + }: "labels", + } + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "today", + &Args{ + "created": []string{"to*"}, + }: "created", + &Args{ + "created": []string{"to(.*)"}, + }: "created", + &Args{ + "created": []string{"tod"}, + }: "created", + &Args{ + "created": []string{"anything", "to*"}, + }: "created", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"tomorrow"}, + }: "created", + &Args{ + "created": []string{"to(day"}, + }: "created", + &Args{ + "created": []string{"tom(.*)"}, + }: "created", + &Args{ + "created": []string{"today1"}, + "labels": []string{"today"}, + }: "created", + } + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go index 5f793068..a21ba137 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -1,5 +1,7 @@ // +build !windows +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. package kernel import ( @@ -8,20 +10,21 @@ import ( "fmt" ) -type KernelVersionInfo struct { - Kernel int - Major int - Minor int - Flavor string +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) } -func (k *KernelVersionInfo) String() string { +func (k *VersionInfo) String() string { return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) } -// Compare two KernelVersionInfo struct. +// CompareKernelVersion compares two kernel.VersionInfo structs. // Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b *KernelVersionInfo) int { +func CompareKernelVersion(a, b VersionInfo) int { if a.Kernel < b.Kernel { return -1 } else if a.Kernel > b.Kernel { @@ -43,7 +46,8 @@ func CompareKernelVersion(a, b *KernelVersionInfo) int { return 0 } -func GetKernelVersion() (*KernelVersionInfo, error) { +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { var ( err error ) @@ -67,7 +71,8 @@ func GetKernelVersion() (*KernelVersionInfo, error) { return ParseRelease(string(release)) } -func ParseRelease(release string) (*KernelVersionInfo, error) { +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { var ( kernel, major, minor, parsed int flavor, partial string @@ -86,7 +91,7 @@ func ParseRelease(release string) (*KernelVersionInfo, error) { flavor = partial } - return &KernelVersionInfo{ + return &VersionInfo{ Kernel: kernel, Major: major, Minor: minor, diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go index e211a63b..6a2c2468 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go @@ -1,17 +1,18 @@ package kernel import ( + "fmt" "testing" ) -func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { var ( - a *KernelVersionInfo + a *VersionInfo ) a, _ = ParseRelease(release) - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) } if a.Flavor != b.Flavor { t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) @@ -19,15 +20,29 @@ func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, resu } func TestParseRelease(t *testing.T) { - assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) - assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) - assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } } -func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } @@ -35,27 +50,43 @@ func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { func TestCompareKernelVersion(t *testing.T) { assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, 1) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 1) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, -1) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go index 399d63e5..85ca250c 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -6,18 +6,20 @@ import ( "unsafe" ) -type KernelVersionInfo struct { - kvi string - major int - minor int - build int +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) } -func (k *KernelVersionInfo) String() string { +func (k *VersionInfo) String() string { return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) } -func GetKernelVersion() (*KernelVersionInfo, error) { +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { var ( h syscall.Handle @@ -25,7 +27,7 @@ func GetKernelVersion() (*KernelVersionInfo, error) { err error ) - KVI := &KernelVersionInfo{"Unknown", 0, 0, 0} + KVI := &VersionInfo{"Unknown", 0, 0, 0} if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go index 8ca814c1..7d12fcbd 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -4,6 +4,9 @@ import ( "syscall" ) +// Utsname represents the system name structure. +// It is passthgrouh for syscall.Utsname in order to make it portable with +// other platforms where it is not available. type Utsname syscall.Utsname func uname() (*syscall.Utsname, error) { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go index 00c54225..79c66b32 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -6,6 +6,9 @@ import ( "errors" ) +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. type Utsname struct { Release [65]byte } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go new file mode 100644 index 00000000..0589cf2a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go @@ -0,0 +1,18 @@ +package operatingsystem + +import ( + "errors" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + // TODO: Implement OS detection + return "", errors.New("Cannot detect OS version") +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection + return false, errors.New("Cannot detect if we are in container") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go index af185f9f..ca8ea8f0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -1,3 +1,5 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. package operatingsystem import ( @@ -14,6 +16,7 @@ var ( etcOsRelease = "/etc/os-release" ) +// GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { b, err := ioutil.ReadFile(etcOsRelease) if err != nil { @@ -26,6 +29,7 @@ func GetOperatingSystem() (string, error) { return "", errors.New("PRETTY_NAME not found") } +// IsContainerized returns true if we are running inside a container. func IsContainerized() (bool, error) { b, err := ioutil.ReadFile(proc1Cgroup) if err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go index c843c6f8..3c86b6af 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -8,6 +8,7 @@ import ( // See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c // for a similar sample +// GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { var h syscall.Handle @@ -41,7 +42,8 @@ func GetOperatingSystem() (string, error) { return ret, nil } -// No-op on Windows +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. func IsContainerized() (bool, error) { return false, nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go index 32d87734..e326a119 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go @@ -1,12 +1,19 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. package parsers import ( "fmt" + "net/url" + "path" "runtime" "strconv" "strings" ) +// ParseHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr // FIXME: Change this not to receive default value as parameter func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) @@ -15,7 +22,7 @@ func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { addr = fmt.Sprintf("unix://%s", defaultUnixAddr) } else { // Note - defaultTCPAddr already includes tcp:// prefix - addr = fmt.Sprintf("%s", defaultTCPAddr) + addr = defaultTCPAddr } } addrParts := strings.Split(addr, "://") @@ -35,6 +42,10 @@ func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { } } +// ParseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. func ParseUnixAddr(addr string, defaultAddr string) (string, error) { addr = strings.TrimPrefix(addr, "unix://") if strings.Contains(addr, "://") { @@ -46,13 +57,20 @@ func ParseUnixAddr(addr string, defaultAddr string) (string, error) { return fmt.Sprintf("unix://%s", addr), nil } +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from addr, or the contents of defaultAddr if addr is a blank string. func ParseTCPAddr(addr string, defaultAddr string) (string, error) { addr = strings.TrimPrefix(addr, "tcp://") if strings.Contains(addr, "://") || addr == "" { return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) } - hostParts := strings.Split(addr, ":") + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + hostParts := strings.Split(u.Host, ":") if len(hostParts) != 2 { return "", fmt.Errorf("Invalid bind address format: %s", addr) } @@ -65,10 +83,10 @@ func ParseTCPAddr(addr string, defaultAddr string) (string, error) { if err != nil && p == 0 { return "", fmt.Errorf("Invalid bind address format: %s", addr) } - return fmt.Sprintf("tcp://%s:%d", host, p), nil + return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil } -// Get a repos name and returns the right reposName + tag|digest +// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest // Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb @@ -88,6 +106,8 @@ func ParseRepositoryTag(repos string) (string, string) { return repos, "" } +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( @@ -109,6 +129,7 @@ func PartParser(template, data string) (map[string]string, error) { return out, nil } +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { @@ -117,6 +138,7 @@ func ParseKeyValueOpt(opt string) (string, string, error) { return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { return 0, 0, fmt.Errorf("Empty string specified for ports.") @@ -142,6 +164,7 @@ func ParsePortRange(ports string) (uint64, uint64, error) { return start, end, nil } +// ParseLink parses and validates the specified string as a link format (name:alias) func ParseLink(val string) (string, string, error) { if val == "" { return "", "", fmt.Errorf("empty string specified for links") @@ -153,5 +176,12 @@ func ParseLink(val string) (string, string, error) { if len(arr) == 1 { return val, val, nil } + // This is kept because we can actually get an HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } return arr[0], arr[1], nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go index 89f4ae02..903c66af 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -7,38 +7,45 @@ import ( func TestParseHost(t *testing.T) { var ( - defaultHttpHost = "127.0.0.1" + defaultHTTPHost = "127.0.0.1" defaultUnix = "/var/run/docker.sock" ) - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { - t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp://": "Invalid proto, expected tcp: ", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { - t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) + valids := map[string]string{ + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "": "unix:///var/run/docker.sock", + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix:///var/run/docker.sock", + "fd://": "fd://", + "fd://something": "fd://something", } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { - t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) + for invalidAddr, expectedError := range invalids { + if addr, err := ParseHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { - t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) + for validAddr, expectedAddr := range valids { + if addr, err := ParseHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr) + } } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { - t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { - t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { - t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) } } @@ -73,6 +80,9 @@ func TestParseRepositoryTag(t *testing.T) { } func TestParsePortMapping(t *testing.T) { + if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil { + t.Fatalf("Expected an error, got %v", err) + } data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") if err != nil { t.Fatal(err) @@ -92,12 +102,55 @@ func TestParsePortMapping(t *testing.T) { } } +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + func TestParsePortRange(t *testing.T) { if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) } } +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + func TestParsePortRangeIncorrectRange(t *testing.T) { if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 00000000..76e84f9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go new file mode 100644 index 00000000..78689800 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_test.go @@ -0,0 +1,162 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + written, err = writer.Write([]byte("barfoo")) + if err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go new file mode 100644 index 00000000..865f5f39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go @@ -0,0 +1,61 @@ +package random + +import ( + "io" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initilized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + return &lockedSource{ + src: rand.NewSource(time.Now().UnixNano()), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random_test.go new file mode 100644 index 00000000..cf405f78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random_test.go @@ -0,0 +1,22 @@ +package random + +import ( + "math/rand" + "sync" + "testing" +) + +// for go test -v -race +func TestConcurrency(t *testing.T) { + rnd := rand.New(NewSource()) + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + rnd.Int63() + wg.Done() + }() + } + wg.Wait() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000..c7f797a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go @@ -0,0 +1,23 @@ +// +build freebsd + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go index 8dc3f3a4..3c3a73a9 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go @@ -7,6 +7,16 @@ import ( "syscall" ) +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which have Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { return &exec.Cmd{ Path: Self(), diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go index 4adcd8f1..ad4ea38e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows +// +build !linux,!windows,!freebsd package reexec @@ -6,6 +6,7 @@ import ( "os/exec" ) +// Command is unsupported on operating systems apart from Linux and Windows. func Command(args ...string) *exec.Cmd { return nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go index 124d42fc..8d65e0ae 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go @@ -6,6 +6,15 @@ import ( "os/exec" ) +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { return &exec.Cmd{ Path: Self(), diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go index a5f01a26..20491e05 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go @@ -30,8 +30,7 @@ func Init() bool { return false } -// Self returns the path to the current processes binary -func Self() string { +func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 00000000..684b4d4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,168 @@ +package stdcopy + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/Sirupsen/logrus" +) + +const ( + StdWriterPrefixLen = 8 + StdWriterFdIndex = 0 + StdWriterSizeIndex = 4 +) + +type StdType [StdWriterPrefixLen]byte + +var ( + Stdin StdType = StdType{0: 0} + Stdout StdType = StdType{0: 1} + Stderr StdType = StdType{0: 2} +) + +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + var n1, n2 int + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + n1, err = w.Writer.Write(w.prefix[:]) + if err != nil { + n = n1 - StdWriterPrefixLen + } else { + n2, err = w.Writer.Write(buf) + n = n1 + n2 - StdWriterPrefixLen + } + if n < 0 { + n = 0 + } + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var ErrInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < StdWriterPrefixLen { + logrus.Debugf("Corrupted prefix: %v", buf[:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading header: %s", er) + return 0, er + } + } + + // Check the first byte to know where to write + switch buf[StdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) + return 0, ErrInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + logrus.Debugf("framesize: %d", frameSize) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+StdWriterPrefixLen > bufLen { + logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+StdWriterPrefixLen { + logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) + return written, nil + } + break + } + if er != nil { + logrus.Debugf("Error reading frame: %s", er) + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) + if ew != nil { + logrus.Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+StdWriterPrefixLen:]) + // Move the index + nr -= frameSize + StdWriterPrefixLen + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 00000000..a9fd73a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,85 @@ +package stdcopy + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := StdWriter{ + Writer: nil, + prefix: Stdout, + sizeBuf: make([]byte, 4), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n) + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 00000000..37a5098f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 00000000..ab1f9d47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,67 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + + "github.com/docker/docker/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a langer prefix, or the full-length Id. +func TruncateID(id string) string { + trimTo := shortLen + if len(id) < shortLen { + trimTo = len(id) + } + return id[:trimTo] +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + var r io.Reader = random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns an unique id. +func GenerateRandomID() string { + return generateID(true) + +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid_test.go new file mode 100644 index 00000000..bcb13654 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid_test.go @@ -0,0 +1,56 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := GenerateRandomID() + truncID := TruncateID(id) + if len(truncID) != 12 { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000..9e4bd4db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000..ac74d8f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md new file mode 100644 index 00000000..0d1dbb70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md @@ -0,0 +1,5 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 00000000..b4bdff24 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,131 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + root, err = filepath.Abs(root) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if cleanP == string(filepath.Separator) { + // never Lstat "/" itself + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p + string(filepath.Separator)) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go new file mode 100644 index 00000000..89209484 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go @@ -0,0 +1,402 @@ +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 00000000..63045186 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,9 @@ +package system + +import ( + "errors" +) + +var ( + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 00000000..23f7c618 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +const ( + EVENT_ALL_ACCESS = 0x1F0003 + EVENT_MODIFY_STATUS = 0x0002 +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 = 0 + if manualReset { + _p1 = 1 + } + var _p2 uint32 = 0 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 = 0 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 00000000..e1f70e8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "os" +) + +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 00000000..90b50060 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,64 @@ +// +build windows + +package system + +import ( + "os" + "regexp" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 00000000..d0e43b37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go new file mode 100644 index 00000000..6bac492e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go @@ -0,0 +1,28 @@ +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 00000000..eee1be26 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package system + +import ( + "os" +) + +// Some explanation for my own sanity, and hopefully maintainers in the +// future. +// +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. + +func Lstat(path string) (*Stat_t, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &Stat_t{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 00000000..3b6e947e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 00000000..e2ca1400 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,71 @@ +package system + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/docker/pkg/units" +) + +var ( + ErrMalformed = errors.New("malformed file") +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given a io.Reader to the file. +// +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go new file mode 100644 index 00000000..10ddf796 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go @@ -0,0 +1,38 @@ +package system + +import ( + "strings" + "testing" + + "github.com/docker/docker/pkg/units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000..604d3387 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package system + +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 00000000..d4664259 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 00000000..26617eb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,20 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 00000000..1811542a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package system + +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 00000000..e2ecfe52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go @@ -0,0 +1,46 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Stat_t type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file +type Stat_t struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +func (s Stat_t) Mode() uint32 { + return s.mode +} + +func (s Stat_t) Uid() uint32 { + return s.uid +} + +func (s Stat_t) Gid() uint32 { + return s.gid +} + +func (s Stat_t) Rdev() uint64 { + return s.rdev +} + +func (s Stat_t) Size() int64 { + return s.size +} + +func (s Stat_t) Mtim() syscall.Timespec { + return s.mtim +} + +func (s Stat_t) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 00000000..4b2198b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 00000000..80262d95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.Stat_t from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*Stat_t, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go new file mode 100644 index 00000000..45341292 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go @@ -0,0 +1,37 @@ +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.Uid() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.Gid() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 00000000..5251ae21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.Stat_t type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { + return &Stat_t{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 00000000..b1fd39e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,36 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +type Stat_t struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +func (s Stat_t) Name() string { + return s.name +} + +func (s Stat_t) Size() int64 { + return s.size +} + +func (s Stat_t) Mode() os.FileMode { + return s.mode +} + +func (s Stat_t) ModTime() time.Time { + return s.modTime +} + +func (s Stat_t) IsDir() bool { + return s.isDir +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 00000000..fddbecd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 00000000..3be563f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go new file mode 100644 index 00000000..4c6002fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go @@ -0,0 +1,11 @@ +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..ceaa044c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 00000000..8f902982 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,28 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + AT_FDCWD := -100 + AT_SYMLINK_NOFOLLOW := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go new file mode 100644 index 00000000..350cce1e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go @@ -0,0 +1,66 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..adf2734f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..00edb201 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,59 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Returns a nil slice and nil error if the xattr is not set +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..0060c167 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package system + +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 00000000..b42983e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go new file mode 100644 index 00000000..719f7289 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go @@ -0,0 +1,63 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 00000000..7c2161c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be medled with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 00000000..bb700d8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 00000000..d2df58c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,294 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 00000000..51e95373 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,225 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go new file mode 100644 index 00000000..89626660 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -0,0 +1,648 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 00000000..48e2af34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 00000000..8988b9f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go new file mode 100644 index 00000000..88e0a578 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 00000000..9727ecde --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go new file mode 100644 index 00000000..1005084f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -0,0 +1,51 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthgrouh for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + newState.Oflag = newState.Oflag | C.OPOST + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go new file mode 100644 index 00000000..266039ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go @@ -0,0 +1,19 @@ +// +build !windows +// +build !linux !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 00000000..7912ae43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,132 @@ +// +build !windows + +// Package term provides provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "io" + "os" + "os/signal" + "syscall" + "unsafe" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 00000000..da9295ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,205 @@ +// +build windows + +package term + +import ( + "fmt" + "io" + "os" + "os/signal" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + switch { + case os.Getenv("ConEmuANSI") == "ON": + // The ConEmu shell emulates ANSI well by default. + return os.Stdin, os.Stdout, os.Stderr + case os.Getenv("MSYSTEM") != "": + // MSYS (mingw) does not emulate ANSI well. + return windows.ConsoleStreams() + default: + return windows.ConsoleStreams() + } +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windows.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + x: 0, + y: 0} + + // Note: GetWinsize is called frequently -- uncomment only for excessive details + // logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String()) + // logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y) + return winsize, nil +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + + // Ensure the requested dimensions are no larger than the maximum window size + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return err + } + + if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) { + return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)", + ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y) + } + + // Narrow the sizes to that used by Windows + width := winterm.SHORT(ws.Width) + height := winterm.SHORT(ws.Height) + + // Set the dimensions while ensuring they remain within the bounds of the backing console buffer + // -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs, + // shift the upper left just enough to keep the new window within the buffer. + rect := info.Window + if width < rect.Right-rect.Left+1 { + rect.Right = rect.Left + width - 1 + } else if width > rect.Right-rect.Left+1 { + rect.Right = rect.Left + width - 1 + if rect.Right >= info.Size.X { + rect.Left = info.Size.X - width + rect.Right = info.Size.X - 1 + } + } + + if height < rect.Bottom-rect.Top+1 { + rect.Bottom = rect.Top + height - 1 + } else if height > rect.Bottom-rect.Top+1 { + rect.Bottom = rect.Top + height - 1 + if rect.Bottom >= info.Size.Y { + rect.Top = info.Size.Y - height + rect.Bottom = info.Size.Y - 1 + } + } + logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect) + + return winterm.SetConsoleWindowInfo(fd, true, rect) +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windows.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + mode := state.mode + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go new file mode 100644 index 00000000..480db900 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go new file mode 100644 index 00000000..ed843ad6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 00000000..22921b6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,47 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 00000000..53becb01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,256 @@ +// +build windows + +package windows + +import ( + "bytes" + "errors" + "fmt" + "os" + "strings" + "unsafe" + + . "github.com/Azure/go-ansiterm" + . "github.com/Azure/go-ansiterm/winterm" +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte + // TODO(azlinux): Remove this and hard-code the string -- it is not going to change + escapeSequence []byte +} + +func newAnsiReader(nFile int) *ansiReader { + file, fd := GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(KEY_ESC_CSI), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, ar.escapeSequence) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translater") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("Unexpected copy length encountered.") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > MAX_INPUT_EVENTS { + countRecords = MAX_INPUT_EVENTS + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := WaitForSingleObject(fd, WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[WORD]string{ + VK_UP: "%s%sA", + VK_DOWN: "%s%sB", + VK_RIGHT: "%s%sC", + VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[WORD]string{ + VK_UP: "\x1B[%sA", + VK_DOWN: "\x1B[%sB", + VK_RIGHT: "\x1B[%sC", + VK_LEFT: "\x1B[%sD", + VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + VK_END: "\x1B[4%s~", // showkey shows ^[[4 + VK_INSERT: "\x1B[2%s~", + VK_DELETE: "\x1B[3%s~", + VK_PRIOR: "\x1B[5%s~", + VK_NEXT: "\x1B[6%s~", + VK_F1: "", + VK_F2: "", + VK_F3: "\x1B[13%s~", + VK_F4: "\x1B[14%s~", + VK_F5: "\x1B[15%s~", + VK_F6: "\x1B[17%s~", + VK_F7: "\x1B[18%s~", + VK_F8: "\x1B[19%s~", + VK_F9: "\x1B[20%s~", + VK_F10: "\x1B[21%s~", + VK_F11: "\x1B[23%s~", + VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key WORD, controlState DWORD, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control, false) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState DWORD) (shift, alt, control bool) { + shift = 0 != (controlState & SHIFT_PRESSED) + alt = 0 != (controlState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (LEFT_CTRL_PRESSED | RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control, meta bool) string { + if shift && alt && control { + return KEY_CONTROL_PARAM_8 + } + if alt && control { + return KEY_CONTROL_PARAM_7 + } + if shift && control { + return KEY_CONTROL_PARAM_6 + } + if control { + return KEY_CONTROL_PARAM_5 + } + if shift && alt { + return KEY_CONTROL_PARAM_4 + } + if alt { + return KEY_CONTROL_PARAM_3 + } + if shift { + return KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 00000000..a22d47fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,76 @@ +// +build windows + +package windows + +import ( + "io/ioutil" + "os" + + . "github.com/Azure/go-ansiterm" + . "github.com/Azure/go-ansiterm/winterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *AnsiParser +} + +func newAnsiWriter(nFile int) *ansiWriter { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + + file, fd := GetStdFile(nFile) + info, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := CreateParser("Ground", CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go new file mode 100644 index 00000000..ecd1c592 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go @@ -0,0 +1,61 @@ +// +build windows + +package windows + +import ( + "io" + "os" + "syscall" + + . "github.com/Azure/go-ansiterm/winterm" +) + +// ConsoleStreams, for each standard stream referencing a console, returns a wrapped version +// that handles ANSI character sequences. +func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + if IsConsole(os.Stdin.Fd()) { + stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if IsConsole(os.Stdout.Fd()) { + stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if IsConsole(os.Stderr.Fd()) { + stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return stdIn, stdOut, stdErr +} + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 00000000..bf4c7b50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,5 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windows diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows_test.go new file mode 100644 index 00000000..52aeab54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows_test.go @@ -0,0 +1,3 @@ +// This file is necessary to pass the Docker tests. + +package windows diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go new file mode 100644 index 00000000..ea19daad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go @@ -0,0 +1,27 @@ +// Package timeutils provides helper functions to parse and print time (time.Time). +package timeutils + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json_test.go new file mode 100644 index 00000000..1ff33317 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json_test.go @@ -0,0 +1,47 @@ +package timeutils + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils.go new file mode 100644 index 00000000..8437f124 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils.go @@ -0,0 +1,36 @@ +package timeutils + +import ( + "strconv" + "strings" + "time" +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) string { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10) + } + + var format string + if strings.Contains(value, ".") { + format = time.RFC3339Nano + } else { + format = time.RFC3339 + } + + loc := time.FixedZone(time.Now().Zone()) + if len(value) < len(format) { + format = format[:len(value)] + } + t, err := time.ParseInLocation(format, value, loc) + if err != nil { + return value + } + return strconv.FormatInt(t.Unix(), 10) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils_test.go new file mode 100644 index 00000000..f71dcb53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils_test.go @@ -0,0 +1,44 @@ +package timeutils + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now() + cases := []struct{ in, expected string }{ + {"0", "-62167305600"}, // 0 gets parsed year 0 + + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045"}, + {"2006-01-02T15:04:05.999999999Z", "1136214245"}, + {"2006-01-02T15:04:05.999999999", "1136214245"}, + {"2006-01-02T15:04:05", "1136214245"}, + {"2006-01-02T15:04", "1136214240"}, + {"2006-01-02T15", "1136214000"}, + {"2006-01-02T", "1136160000"}, + {"2006-01-02", "1136160000"}, + {"2006", "1136073600"}, + {"2015-05-13T20:39:09Z", "1431549549"}, + + // unix timestamps returned as is + {"1136073600", "1136073600"}, + + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix())}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix())}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix())}, + + // String fallback + {"invalid", "invalid"}, + } + + for _, c := range cases { + o := GetTimestamp(c.in, now) + if o != c.expected { + t.Fatalf("wrong value for '%s'. expected:'%s' got:'%s'", c.in, c.expected, o) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tlsconfig/config.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tlsconfig/config.go new file mode 100644 index 00000000..88f768ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tlsconfig/config.go @@ -0,0 +1,132 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} + +// For use by code which already has a crypto/tls options struct but wants to +// use a commonly accepted set of TLS cipher suites, with known weak algorithms removed +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. +var ServerDefault = tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, +} + +// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. +var ClientDefault = tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + certPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Could not read CA certificate %s: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %s", caFile) + } + s := certPool.Subjects() + subjects := make([]string, len(s)) + for i, subject := range s { + subjects[i] = string(subject) + } + logrus.Debugf("Trusting certs with subjects: %v", subjects) + return certPool, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + if options.CertFile != "" && options.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + + return &tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + return &tlsConfig, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go index eb2ae4e8..8fb0d804 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go @@ -1,3 +1,5 @@ +// Package ulimit provides structure and helper function to parse and represent +// resource limits (Rlimit and Ulimit, its human friendly version). package ulimit import ( @@ -6,13 +8,14 @@ import ( "strings" ) -// Human friendly version of Rlimit +// Ulimit is a human friendly version of Rlimit. type Ulimit struct { Name string Hard int64 Soft int64 } +// Rlimit specifies the resource limits, such as max open files. type Rlimit struct { Type int `json:"type,omitempty"` Hard uint64 `json:"hard,omitempty"` @@ -24,43 +27,44 @@ const ( // some of these are defined in the syscall package, but not all. // Also since Windows client doesn't get access to the syscall package, need to // define these here - RLIMIT_AS = 9 - RLIMIT_CORE = 4 - RLIMIT_CPU = 0 - RLIMIT_DATA = 2 - RLIMIT_FSIZE = 1 - RLIMIT_LOCKS = 10 - RLIMIT_MEMLOCK = 8 - RLIMIT_MSGQUEUE = 12 - RLIMIT_NICE = 13 - RLIMIT_NOFILE = 7 - RLIMIT_NPROC = 6 - RLIMIT_RSS = 5 - RLIMIT_RTPRIO = 14 - RLIMIT_RTTIME = 15 - RLIMIT_SIGPENDING = 11 - RLIMIT_STACK = 3 + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 ) var ulimitNameMapping = map[string]int{ - //"as": RLIMIT_AS, // Disbaled since this doesn't seem usable with the way Docker inits a container. - "core": RLIMIT_CORE, - "cpu": RLIMIT_CPU, - "data": RLIMIT_DATA, - "fsize": RLIMIT_FSIZE, - "locks": RLIMIT_LOCKS, - "memlock": RLIMIT_MEMLOCK, - "msgqueue": RLIMIT_MSGQUEUE, - "nice": RLIMIT_NICE, - "nofile": RLIMIT_NOFILE, - "nproc": RLIMIT_NPROC, - "rss": RLIMIT_RSS, - "rtprio": RLIMIT_RTPRIO, - "rttime": RLIMIT_RTTIME, - "sigpending": RLIMIT_SIGPENDING, - "stack": RLIMIT_STACK, + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, } +// Parse parses and returns a Ulimit from the specified string. func Parse(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { @@ -92,6 +96,7 @@ func Parse(val string) (*Ulimit, error) { return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil } +// GetRlimit returns the RLimit corresponding to Ulimit. func (u *Ulimit) GetRlimit() (*Rlimit, error) { t, exists := ulimitNameMapping[u.Name] if !exists { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go index 44012aaf..c219a8a9 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go @@ -1,3 +1,5 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. package units import ( @@ -6,7 +8,7 @@ import ( ) // HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.) +// (eg. "About a minute", "4 hours ago", etc.). func HumanDuration(d time.Duration) string { if seconds := int(d.Seconds()); seconds < 1 { return "Less than a second" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go index d7850ad0..2fde3b41 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go @@ -38,7 +38,7 @@ var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} // CustomSize returns a human-readable approximation of a size -// using custom format +// using custom format. func CustomSize(format string, size float64, base float64, _map []string) string { i := 0 for size >= base { @@ -49,17 +49,19 @@ func CustomSize(format string, size float64, base float64, _map []string) string } // HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB") +// using SI standard (eg. "44kB", "17MB"). func HumanSize(size float64) string { - return CustomSize("%.4g %s", float64(size), 1000.0, decimapAbbrs) + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) } +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB") +// size using SI standard (eg. "44kB", "17MB"). func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } @@ -72,7 +74,7 @@ func RAMInBytes(size string) (int64, error) { return parseSize(size, binaryMap) } -// Parses the human-readable size string into the amount it represents +// Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 3 { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 00000000..f7094b1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,50 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "udp://", "unix://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go new file mode 100644 index 00000000..bb89d8b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -0,0 +1,55 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 00000000..d9cb367d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 00000000..a4109748 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of verson information +// will be concatinated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent_test.go new file mode 100644 index 00000000..0ad7243a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go new file mode 100644 index 00000000..bd5ec7a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go @@ -0,0 +1,63 @@ +package version + +import ( + "strconv" + "strings" +) + +// Version provides utility methods for comparing versions. +type Version string + +func (v Version) compareTo(other Version) int { + var ( + currTab = strings.Split(string(v), ".") + otherTab = strings.Split(string(other), ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 +} + +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go new file mode 100644 index 00000000..c02ec40f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go @@ -0,0 +1,27 @@ +package version + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := Version(a).compareTo(Version(b)); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) + +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go b/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go new file mode 100644 index 00000000..57560935 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go @@ -0,0 +1,254 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" +) + +// Login tries to register/login to the registry server. +func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) { + // Separates the v2 registry login logic from the v1 logic. + if registryEndpoint.Version == APIVersion2 { + return loginV2(authConfig, registryEndpoint, "" /* scope */) + } + return loginV1(authConfig, registryEndpoint) +} + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) { + var ( + status string + reqBody []byte + err error + reqStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) + + logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) + + if serverAddress == "" { + return "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + // to avoid sending the server address to the server it should be removed before being marshalled + authCopy := *authConfig + authCopy.ServerAddress = "" + + jsonBody, err := json.Marshal(authCopy) + if err != nil { + return "", fmt.Errorf("Config Error: %s", err) + } + + // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. + b := strings.NewReader(string(jsonBody)) + req1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) + if err != nil { + return "", fmt.Errorf("Server Error: %s", err) + } + reqStatusCode = req1.StatusCode + defer req1.Body.Close() + reqBody, err = ioutil.ReadAll(req1.Body) + if err != nil { + return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) + } + + if reqStatusCode == 201 { + if loginAgainstOfficialIndex { + status = "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + } else { + // *TODO: Use registry configuration to determine what this says, if anything? + status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." + } + } else if reqStatusCode == 400 { + if string(reqBody) == "\"Username or email already exists\"" { + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + return "Login Succeeded", nil + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == 403 { + if loginAgainstOfficialIndex { + return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == 500 { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", fmt.Errorf("Internal Server Error") + } + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) + } + return "", fmt.Errorf("Registration: %s", reqBody) + + } else if reqStatusCode == 401 { + // This case would happen with private registries where /v1/users is + // protected, so people can use `docker login` as an auth check. + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + return "Login Succeeded", nil + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) + } + return status, nil +} + +// loginV2 tries to login to the v2 registry server. The given registry endpoint has been +// pinged or setup with a list of authorization challenges. Each of these challenges are +// tried until one of them succeeds. Currently supported challenge schemes are: +// HTTP Basic Authorization +// Token Authorization with a separate token issuing server +// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For +// now, users should create their account through other means like directly from a web page +// served by the v2 registry service provider. Whether this will be supported in the future +// is to be determined. +func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) + var ( + err error + allErrors []error + ) + + for _, challenge := range registryEndpoint.AuthChallenges { + params := make(map[string]string, len(challenge.Parameters)+1) + for k, v := range challenge.Parameters { + params[k] = v + } + params["scope"] = scope + logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params) + + switch strings.ToLower(challenge.Scheme) { + case "basic": + err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint) + case "bearer": + err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint) + default: + // Unsupported challenge types are explicitly skipped. + err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) + } + + if err == nil { + return "Login Succeeded", nil + } + + logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) + + allErrors = append(allErrors, err) + } + + return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) +} + +func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { + req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) + if err != nil { + return err + } + + req.SetBasicAuth(authConfig.Username, authConfig.Password) + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return nil +} + +func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { + token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint) + if err != nil { + return err + } + + req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return nil +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(config *cliconfig.ConfigFile, index *IndexInfo) cliconfig.AuthConfig { + configKey := index.GetAuthConfigKey() + // First try the happy case + if c, found := config.AuthConfigs[configKey]; found || index.Official { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range config.AuthConfigs { + if configKey == convertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return cliconfig.AuthConfig{} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go b/Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go new file mode 100644 index 00000000..a8e3da01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go @@ -0,0 +1,173 @@ +package registry + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/cliconfig" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &cliconfig.AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := cliconfig.EncodeAuth(newAuthConfig) + decAuthConfig := &cliconfig.AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = cliconfig.DecodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} + +func setupTempConfigFile() (*cliconfig.ConfigFile, error) { + root, err := ioutil.TempDir("", "docker-test-auth") + if err != nil { + return nil, err + } + root = filepath.Join(root, cliconfig.ConfigFileName) + configFile := cliconfig.NewConfigFile(root) + + for _, registry := range []string{"testIndex", IndexServer} { + configFile.AuthConfigs[registry] = cliconfig.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + Email: "docker@docker.io", + } + } + + return configFile, nil +} + +func TestSameAuthDataPostSave(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.Filename()) + + err = configFile.Save() + if err != nil { + t.Fatal(err) + } + + authConfig := configFile.AuthConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Email != "docker@docker.io" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.Filename()) + + indexConfig := configFile.AuthConfigs[IndexServer] + + officialIndex := &IndexInfo{ + Official: true, + } + privateIndex := &IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(configFile, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(configFile, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.Filename()) + + registryAuth := cliconfig.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + Email: "foo@example.com", + } + localAuth := cliconfig.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + Email: "bar@example.com", + } + officialAuth := cliconfig.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + Email: "baz@example.com", + } + configFile.AuthConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]cliconfig.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok || configured.Email == "" { + t.Fail() + } + index := &IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + configFile.AuthConfigs[registry] = configured + resolved := ResolveAuthConfig(configFile, index) + if resolved.Email != configured.Email { + t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) + } + delete(configFile.AuthConfigs, registry) + resolved = ResolveAuthConfig(configFile, index) + if resolved.Email == configured.Email { + t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go b/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go new file mode 100644 index 00000000..e300d82a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go @@ -0,0 +1,150 @@ +package registry + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +type octetType byte + +// AuthorizationChallenge carries information +// from a WWW-Authenticate response header. +type AuthorizationChallenge struct { + Scheme string + Parameters map[string]string +} + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []*AuthorizationChallenge { + var challenges []*AuthorizationChallenge + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + i; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config.go b/Godeps/_workspace/src/github.com/docker/docker/registry/config.go new file mode 100644 index 00000000..95f73129 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/config.go @@ -0,0 +1,372 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "strings" + + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +// Options holds command line options. +type Options struct { + Mirrors opts.ListOpts + InsecureRegistries opts.ListOpts +} + +const ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = "https://registry-1.docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = "https://index.docker.io" + + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" + + // IndexServer is the v1 registry server used for user auth + account creation + IndexServer = DefaultV1Registry + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = NewServiceConfig(nil) +) + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + options.Mirrors = opts.NewListOpts(ValidateMirror) + cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) + options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) + cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) +} + +type netIPNet net.IPNet + +func (ipnet *netIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = netIPNet(*cidr) + } + } + return +} + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*netIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NewServiceConfig returns a new instance of ServiceConfig +func NewServiceConfig(options *Options) *ServiceConfig { + if options == nil { + options = &Options{ + Mirrors: opts.NewListOpts(nil), + InsecureRegistries: opts.NewListOpts(nil), + } + } + + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + options.InsecureRegistries.Set("127.0.0.0/8") + + config := &ServiceConfig{ + InsecureRegistryCIDRs: make([]*netIPNet, 0), + IndexConfigs: make(map[string]*IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors.GetAll(), + } + // Split --insecure-registry into CIDR and registry-specific settings. + for _, r := range options.InsecureRegistries.GetAll() { + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*netIPNet)(ipnet)) + } else { + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = &IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = &IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return config +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func (config *ServiceConfig) isSecureIndex(indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides NewIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + host, _, err := net.SplitHostPort(indexName) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = indexName + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range config.InsecureRegistryCIDRs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return false + } + } + } + + return true +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + // 'index.docker.io' => 'docker.io' + if val == "index."+IndexName { + val = IndexName + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + // *TODO: Check if valid hostname[:port]/ip[:port]? + return val, nil +} + +func validateRemoteName(remoteName string) error { + + if !strings.Contains(remoteName, "/") { + + // the repository name must not be a valid image ID + if err := image.ValidateID(remoteName); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", remoteName) + } + } + + return v2.ValidateRepositoryName(remoteName) +} + +func validateNoSchema(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +// ValidateRepositoryName validates a repository name +func ValidateRepositoryName(reposName string) error { + var err error + if err = validateNoSchema(reposName); err != nil { + return err + } + indexName, remoteName := splitReposName(reposName) + if _, err = ValidateIndexName(indexName); err != nil { + return err + } + return validateRemoteName(remoteName) +} + +// NewIndexInfo returns IndexInfo configuration from indexName +func (config *ServiceConfig) NewIndexInfo(indexName string) (*IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := &IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = config.isSecureIndex(indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func (index *IndexInfo) GetAuthConfigKey() string { + if index.Official { + return IndexServer + } + return index.Name +} + +// splitReposName breaks a reposName into an index name and remote name +func splitReposName(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// NewRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func (config *ServiceConfig) NewRepositoryInfo(reposName string) (*RepositoryInfo, error) { + if err := validateNoSchema(reposName); err != nil { + return nil, err + } + + indexName, remoteName := splitReposName(reposName) + if err := validateRemoteName(remoteName); err != nil { + return nil, err + } + + repoInfo := &RepositoryInfo{ + RemoteName: remoteName, + } + + var err error + repoInfo.Index, err = config.NewIndexInfo(indexName) + if err != nil { + return nil, err + } + + if repoInfo.Index.Official { + normalizedName := repoInfo.RemoteName + if strings.HasPrefix(normalizedName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + normalizedName = strings.SplitN(normalizedName, "/", 2)[1] + } + + repoInfo.LocalName = normalizedName + repoInfo.RemoteName = normalizedName + // If the normalized name does not contain a '/' (e.g. "foo") + // then it is an official repo. + if strings.IndexRune(normalizedName, '/') == -1 { + repoInfo.Official = true + // Fix up remote name for official repos. + repoInfo.RemoteName = "library/" + normalizedName + } + + repoInfo.CanonicalName = "docker.io/" + repoInfo.RemoteName + } else { + repoInfo.LocalName = repoInfo.Index.Name + "/" + repoInfo.RemoteName + repoInfo.CanonicalName = repoInfo.LocalName + + } + + return repoInfo, nil +} + +// GetSearchTerm special-cases using local name for official index, and +// remote name for private indexes. +func (repoInfo *RepositoryInfo) GetSearchTerm() string { + if repoInfo.Index.Official { + return repoInfo.LocalName + } + return repoInfo.RemoteName +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName string) (*RepositoryInfo, error) { + return emptyServiceConfig.NewRepositoryInfo(reposName) +} + +// NormalizeLocalName transforms a repository name into a normalize LocalName +// Passes through the name without transformation on error (image id, etc) +func NormalizeLocalName(name string) string { + repoInfo, err := ParseRepositoryInfo(name) + if err != nil { + return name + } + return repoInfo.LocalName +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config_test.go b/Godeps/_workspace/src/github.com/docker/docker/registry/config_test.go new file mode 100644 index 00000000..25578a7f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/config_test.go @@ -0,0 +1,49 @@ +package registry + +import ( + "testing" +) + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "https://mirror-1.com", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/", + "https://mirror-1.com/#frag", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go b/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go new file mode 100644 index 00000000..b7aaedaa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go @@ -0,0 +1,277 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. +func scanForAPIVersion(address string) (string, APIVersion) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + + for k, v := range apiVersions { + if apiVersionStr == v { + address = strings.Join(chunks[:len(chunks)-1], "/") + return address, k + } + } + + return address, APIVersionUnknown +} + +// NewEndpoint parses the given address to return a registry endpoint. +func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + endpoint, err := newEndpoint(index.GetAuthConfigKey(), tlsConfig, metaHeaders) + if err != nil { + return nil, err + } + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) { + var ( + endpoint = new(Endpoint) + trimmedAddress string + err error + ) + + if !strings.HasPrefix(address, "http") { + address = "https://" + address + } + + endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify) + + trimmedAddress, endpoint.Version = scanForAPIVersion(address) + + if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { + return nil, err + } + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...)) + return endpoint, nil +} + +// GetEndpoint returns a new endpoint with the specified headers +func (repoInfo *RepositoryInfo) GetEndpoint(metaHeaders http.Header) (*Endpoint, error) { + return NewEndpoint(repoInfo.Index, metaHeaders) +} + +// Endpoint stores basic information about a registry endpoint. +type Endpoint struct { + client *http.Client + URL *url.URL + Version APIVersion + IsSecure bool + AuthChallenges []*AuthorizationChallenge + URLBuilder *v2.URLBuilder +} + +// Get the formated URL for the root of this registry Endpoint +func (e *Endpoint) String() string { + return fmt.Sprintf("%s/v%d/", e.URL, e.Version) +} + +// VersionString returns a formatted string of this +// endpoint address using the given API Version. +func (e *Endpoint) VersionString(version APIVersion) string { + return fmt.Sprintf("%s/v%d/", e.URL, version) +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *Endpoint) Path(path string) string { + return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) +} + +// Ping pings the remote endpoint with v2 and v1 pings to determine the API +// version. It returns a PingResult containing the discovered version. The +// PingResult also indicates whether the registry is standalone or not. +func (e *Endpoint) Ping() (PingResult, error) { + // The ping logic to use is determined by the registry endpoint version. + switch e.Version { + case APIVersion1: + return e.pingV1() + case APIVersion2: + return e.pingV2() + } + + // APIVersionUnknown + // We should try v2 first... + e.Version = APIVersion2 + regInfo, errV2 := e.pingV2() + if errV2 == nil { + return regInfo, nil + } + + // ... then fallback to v1. + e.Version = APIVersion1 + regInfo, errV1 := e.pingV1() + if errV1 == nil { + return regInfo, nil + } + + e.Version = APIVersionUnknown + return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) +} + +func (e *Endpoint) pingV1() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} + +func (e *Endpoint) pingV2() (PingResult, error) { + logrus.Debugf("attempting v2 ping for registry endpoint %s", e) + + req, err := http.NewRequest("GET", e.Path(""), nil) + if err != nil { + return PingResult{}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{}, err + } + defer resp.Body.Close() + + // The endpoint may have multiple supported versions. + // Ensure it supports the v2 Registry API. + var supportsV2 bool + +HeaderLoop: + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { + for _, versionName := range strings.Fields(supportedVersions) { + if versionName == "registry/2.0" { + supportsV2 = true + break HeaderLoop + } + } + } + + if !supportsV2 { + return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) + } + + if resp.StatusCode == http.StatusOK { + // It would seem that no authentication/authorization is required. + // So we don't need to parse/add any authorization schemes. + return PingResult{Standalone: true}, nil + } + + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + e.AuthChallenges = parseAuthHeader(resp.Header) + return PingResult{}, nil + } + + return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint_test.go b/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint_test.go new file mode 100644 index 00000000..ee301dbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint_test.go @@ -0,0 +1,93 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/v2/", "http://0.0.0.0:5000/v2/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v0/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"}, + } + for _, td := range testData { + e, err := newEndpoint(td.str, nil, nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a v1 registry unless it includes a valid v2 API header. +func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + requireBasicAuthHandlerV2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // This mock server supports v2.0, v2.1, v42.0, and v100.0 + w.Header().Add("Docker-Distribution-API-Version", "registry/100.0 registry/42.0") + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0 registry/2.1") + requireBasicAuthHandler.ServeHTTP(w, r) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := Endpoint{ + URL: testServerURL, + Version: APIVersionUnknown, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.Version != APIVersion1 { + t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion1, testEndpoint.Version) + } + + // Make a test server which should validate as a v2 server. + testServer = httptest.NewServer(requireBasicAuthHandlerV2) + defer testServer.Close() + + testServerURL, err = url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint.URL = testServerURL + testEndpoint.Version = APIVersionUnknown + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.Version != APIVersion2 { + t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion2, testEndpoint.Version) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go b/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go new file mode 100644 index 00000000..e15f83ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go @@ -0,0 +1,68 @@ +package registry + +import ( + "strings" + + "github.com/docker/distribution/digest" +) + +// Reference represents a tag or digest within a repository +type Reference interface { + // HasDigest returns whether the reference has a verifiable + // content addressable reference which may be considered secure. + HasDigest() bool + + // ImageName returns an image name for the given repository + ImageName(string) string + + // Returns a string representation of the reference + String() string +} + +type tagReference struct { + tag string +} + +func (tr tagReference) HasDigest() bool { + return false +} + +func (tr tagReference) ImageName(repo string) string { + return repo + ":" + tr.tag +} + +func (tr tagReference) String() string { + return tr.tag +} + +type digestReference struct { + digest digest.Digest +} + +func (dr digestReference) HasDigest() bool { + return true +} + +func (dr digestReference) ImageName(repo string) string { + return repo + "@" + dr.String() +} + +func (dr digestReference) String() string { + return dr.digest.String() +} + +// ParseReference parses a reference into either a digest or tag reference +func ParseReference(ref string) Reference { + if strings.Contains(ref, ":") { + dgst, err := digest.ParseDigest(ref) + if err == nil { + return digestReference{digest: dgst} + } + } + return tagReference{tag: ref} +} + +// DigestReference creates a digest reference using a digest +func DigestReference(dgst digest.Digest) Reference { + return digestReference{digest: dgst} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go b/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go new file mode 100644 index 00000000..74f731bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go @@ -0,0 +1,237 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/docker/pkg/useragent" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") + errLoginRequired = errors.New("Authentication is required.") +) + +// dockerUserAgent is the User-Agent the Docker client uses to identify itself. +// It is populated on init(), comprising version information of different components. +var dockerUserAgent string + +func init() { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, useragent.VersionInfo{"go", runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{"os", runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{"arch", runtime.GOARCH}) + + dockerUserAgent = useragent.AppendVersions("", httpVersion...) +} + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure { + hostDir := filepath.Join(CertsDir, hostname) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return &tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + // TODO(dmcgowan): Copy system pool + tlsConfig.RootCAs = x509.NewCertPool() + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers that ensure requests have +// the User-Agent header set to dockerUserAgent and that metaHeaders +// are added. +func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{ + transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}), + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns a HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +func shouldV2Fallback(err errcode.Error) bool { + logrus.Debugf("v2 error: %T %v", err, err) + switch err.Code { + case v2.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown: + return true + } + return false +} + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// ContinueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func ContinueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + return ContinueOnError(v[0]) + case ErrNoSupport: + return ContinueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + } + return false +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + var cfg = tlsconfig.ServerDefault + tlsConfig = &cfg + } + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go b/Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go new file mode 100644 index 00000000..fb19e577 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go @@ -0,0 +1,476 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/docker/opts" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *IndexInfo { + index := &IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *IndexInfo { + index := &IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *IndexInfo { + index := &IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *ServiceConfig { + options := &Options{ + Mirrors: opts.NewListOpts(nil), + InsecureRegistries: opts.NewListOpts(nil), + } + if mirrors != nil { + for _, mirror := range mirrors { + options.Mirrors.Set(mirror) + } + } + if insecureRegistries != nil { + for _, insecureRegistries := range insecureRegistries { + options.InsecureRegistries.Set(insecureRegistries) + } + } + + return NewServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName := mux.Vars(r)["repository"] + repositoryName = NormalizeLocalName(repositoryName) + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + repositoryName = NormalizeLocalName(repositoryName) + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + repositoryName = NormalizeLocalName(repositoryName) + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + tags := make(map[string]string) + testRepositories[repositoryName] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := &SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go b/Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go new file mode 100644 index 00000000..88b08dff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go @@ -0,0 +1,918 @@ +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &cliconfig.AuthConfig{} + endpoint, err := NewEndpoint(makeIndex("/v1/"), nil) + if err != nil { + t.Fatal(err) + } + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewEndpoint(index, nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *IndexInfo) *Endpoint { + endpoint, err := NewEndpoint(index, nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *IndexInfo) { + index.Secure = true + _, err := NewEndpoint(index, nil) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *IndexInfo) { + index.Secure = true + _, err := NewEndpoint(index, nil) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := &IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + if endpoint.Version != APIVersion1 { + t.Fatal("Expected endpoint to be v1") + } + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewEndpoint(index, nil) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, 154, "Expected size 154") + if len(json) <= 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, REPO, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, "foo42/baz", "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + data, err := r.GetRepositoryData("foo42/bar") + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestValidateRepositoryName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + err := ValidateRepositoryName(name) + assertNotEqual(t, err, nil, "Expected invalid repo name: "+name) + } + + for _, name := range validRepoNames { + err := ValidateRepositoryName(name) + assertEqual(t, err, nil, "Expected valid repo name: "+name) + } + + err := ValidateRepositoryName(invalidRepoNames[0]) + assertEqual(t, err, ErrInvalidRepositoryName, "Expected ErrInvalidRepositoryName: "+invalidRepoNames[0]) +} + +func TestParseRepositoryInfo(t *testing.T) { + expectedRepoInfos := map[string]RepositoryInfo{ + "fooo/bar": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: &IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: &IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: &IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: &IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: &IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: &IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: &IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: &IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: &IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: &IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: &IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + repoInfo, err := ParseRepositoryInfo(reposName) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, repoInfo.RemoteName, expectedRepoInfo.RemoteName, reposName) + checkEqual(t, repoInfo.LocalName, expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.CanonicalName, expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *ServiceConfig, expectedIndexInfos map[string]*IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := config.NewIndexInfo(indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := NewServiceConfig(nil) + noMirrors := []string{} + expectedIndexInfos := map[string]*IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery") + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestValidRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + } + for _, repositoryName := range validRepositoryNames { + if err := validateRemoteName(repositoryName); err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive hyphens. + "dock--er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if err := validateRemoteName(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := config.isSecureIndex(tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service.go b/Godeps/_workspace/src/github.com/docker/docker/registry/service.go new file mode 100644 index 00000000..f4ea42ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/service.go @@ -0,0 +1,206 @@ +package registry + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/tlsconfig" +) + +// Service is a registry service. It tracks configuration data such as a list +// of mirrors. +type Service struct { + Config *ServiceConfig +} + +// NewService returns a new instance of Service ready to be +// installed into an engine. +func NewService(options *Options) *Service { + return &Service{ + Config: NewServiceConfig(options), + } +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was sucessful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { + addr := authConfig.ServerAddress + if addr == "" { + // Use the official registry address if not specified. + addr = IndexServer + } + index, err := s.ResolveIndex(addr) + if err != nil { + return "", err + } + endpoint, err := NewEndpoint(index, nil) + if err != nil { + return "", err + } + authConfig.ServerAddress = endpoint.String() + return Login(authConfig, endpoint) +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) { + repoInfo, err := s.ResolveRepository(term) + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := repoInfo.GetEndpoint(http.Header(headers)) + if err != nil { + return nil, err + } + r, err := NewSession(endpoint.client, authConfig, endpoint) + if err != nil { + return nil, err + } + return r.SearchRepositories(repoInfo.GetSearchTerm()) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) { + return s.Config.NewRepositoryInfo(name) +} + +// ResolveIndex takes indexName and returns index info +func (s *Service) ResolveIndex(name string) (*IndexInfo, error) { + return s.Config.NewIndexInfo(name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL string + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config + VersionHeader string + Versions []auth.APIVersion +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) { + return newEndpoint(e.URL, e.TLSConfig, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *Service) TLSConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, s.Config.isSecureIndex(hostname)) +} + +func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + return s.TLSConfig(mirrorURL.Host) +} + +// LookupEndpoints creates an list of endpoints to try, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *Service) LookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + if strings.HasPrefix(repoName, DefaultNamespace+"/") { + // v2 mirrors + for _, mirror := range s.Config.Mirrors { + mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirror, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + // v1 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV1Registry, + Version: APIVersion1, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + return endpoints, nil + } + + slashIndex := strings.IndexRune(repoName, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + } + hostname := repoName[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + isSecure := !tlsConfig.InsecureSkipVerify + + v2Versions := []auth.APIVersion{ + { + Type: "registry", + Version: "2.0", + }, + } + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + VersionHeader: DefaultRegistryVersionHeader, + Versions: v2Versions, + }, + { + URL: "https://" + hostname, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if !isSecure { + endpoints = append(endpoints, APIEndpoint{ + URL: "http://" + hostname, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + VersionHeader: DefaultRegistryVersionHeader, + Versions: v2Versions, + }, APIEndpoint{ + URL: "http://" + hostname, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/session.go b/Godeps/_workspace/src/github.com/docker/docker/registry/session.go new file mode 100644 index 00000000..9bec7c1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/session.go @@ -0,0 +1,760 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *cliconfig.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *cliconfig.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *cliconfig.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes a HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referer header as go http package adds said header. + // This is safe as Docker doesn't set Referer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) { + r = &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } + + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return nil, err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return nil, errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return r, nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := -1 + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.Atoi(hdr) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + retries = 5 + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // TODO(tiborvass): why are we doing retries at this level? + // These retries should be generic to both v1 and v2 + for i := 1; i <= retries; i++ { + statusCode = 0 + res, err = r.client.Do(req) + if err == nil { + break + } + logrus.Debugf("Error contacting registry %s: %v", registry, err) + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + if i == retries { + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + time.Sleep(time.Duration(i) * 5 * time.Second) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debugf("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debugf("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repository string, askedTag string) (string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if strings.HasSuffix(err.Error(), "i/o timeout") { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errLoginRequired + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errLoginRequired + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string) (*SearchResults, error) { + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +// GetAuthConfig returns the authentication settings for a session +// TODO(tiborvass): remove this once registry client v2 is vendored +func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &cliconfig.AuthConfig{ + Username: r.authConfig.Username, + Password: password, + Email: r.authConfig.Email, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/token.go b/Godeps/_workspace/src/github.com/docker/docker/registry/token.go new file mode 100644 index 00000000..d91bd455 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/token.go @@ -0,0 +1,81 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +type tokenResponse struct { + Token string `json:"token"` +} + +func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) { + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + if realmURL.Scheme == "" { + if registryEndpoint.IsSecure { + realmURL.Scheme = "https" + } else { + realmURL.Scheme = "http" + } + } + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := params["scope"] + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if username != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/types.go b/Godeps/_workspace/src/github.com/docker/docker/registry/types.go new file mode 100644 index 00000000..09b9d571 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/types.go @@ -0,0 +1,140 @@ +package registry + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial indicates whether the result is an official repository or not + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsOfficial indicates whether the result is trusted + IsTrusted bool `json:"is_trusted"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the acutal results for the search + Results []SearchResult `json:"results"` +} + +// RepositoryData tracks the image list, list of endpoints, and list of tokens +// for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string + // Tokens is currently unused (remove it?) + Tokens []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in a HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +var apiVersions = map[APIVersion]string{ + 1: "v1", + 2: "v2", +} + +// API Version identifiers. +const ( + APIVersionUnknown = iota + APIVersion1 + APIVersion2 +) + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + // Index points to registry information + Index *IndexInfo + // RemoteName is the remote name of the repository, such as + // "library/ubuntu-12.04-base" + RemoteName string + // LocalName is the local name of the repository, such as + // "ubuntu-12.04-base" + LocalName string + // CanonicalName is the canonical name of the repository, such as + // "docker.io/library/ubuntu-12.04-base" + CanonicalName string + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go index 1d969e9b..ebb8ead6 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go @@ -18,7 +18,6 @@ func Compare(a, b *Config) bool { if a.Cmd.Len() != b.Cmd.Len() || len(a.Env) != len(b.Env) || len(a.Labels) != len(b.Labels) || - len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || a.Entrypoint.Len() != b.Entrypoint.Len() || len(a.Volumes) != len(b.Volumes) { @@ -42,11 +41,6 @@ func Compare(a, b *Config) bool { return false } } - for i := 0; i < len(a.PortSpecs); i++ { - if a.PortSpecs[i] != b.PortSpecs[i] { - return false - } - } for k := range a.ExposedPorts { if _, exists := b.ExposedPorts[k]; !exists { return false diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare_test.go new file mode 100644 index 00000000..e59c3b2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare_test.go @@ -0,0 +1,124 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/pkg/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := &Entrypoint{parts: []string{"/bin/sh", "-c"}} + entrypoint2 := &Entrypoint{parts: []string{"/bin/sh", "-d"}} + entrypoint3 := &Entrypoint{parts: []string{"/bin/sh", "-c", "echo"}} + cmd1 := &Command{parts: []string{"/bin/sh", "-c"}} + cmd2 := &Command{parts: []string{"/bin/sh", "-d"}} + cmd3 := &Command{parts: []string{"/bin/sh", "-c", "echo"}} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*Config]*Config{ + // Empty config + &Config{}: {}, + // Does not compare hostname, domainname & image + &Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + &Config{OpenStdin: false}: {OpenStdin: false}, + // only env + &Config{Env: envs1}: {Env: envs1}, + // only cmd + &Config{Cmd: cmd1}: {Cmd: cmd1}, + // only labels + &Config{Labels: labels1}: {Labels: labels1}, + // only exposedPorts + &Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + &Config{Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*Config]*Config{ + nil: nil, + &Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + &Config{OpenStdin: false}: {OpenStdin: true}, + &Config{OpenStdin: true}: {OpenStdin: false}, + // only env + &Config{Env: envs1}: {Env: envs2}, + // only cmd + &Config{Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + &Config{Cmd: cmd1}: {Cmd: cmd3}, + // only labels + &Config{Labels: labels1}: {Labels: labels2}, + // not the same number of labels + &Config{Labels: labels1}: {Labels: labels3}, + // only exposedPorts + &Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + &Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + &Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + &Config{Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + &Config{Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !Compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if Compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go index 1d3d4acf..0312cc4c 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go @@ -5,7 +5,7 @@ import ( "io" "strings" - "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/nat" ) // Entrypoint encapsulates the container entrypoint. @@ -17,6 +17,8 @@ type Entrypoint struct { parts []string } +// MarshalJSON Marshals (or serializes) the Entrypoint into the json format. +// This method is needed to implement json.Marshaller. func (e *Entrypoint) MarshalJSON() ([]byte, error) { if e == nil { return []byte{}, nil @@ -24,7 +26,8 @@ func (e *Entrypoint) MarshalJSON() ([]byte, error) { return json.Marshal(e.Slice()) } -// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +// UnmarshalJSON decodes the entrypoint whether it's a string or an array of strings. +// This method is needed to implement json.Unmarshaler. func (e *Entrypoint) UnmarshalJSON(b []byte) error { if len(b) == 0 { return nil @@ -32,12 +35,17 @@ func (e *Entrypoint) UnmarshalJSON(b []byte) error { p := make([]string, 0, 1) if err := json.Unmarshal(b, &p); err != nil { - p = append(p, string(b)) + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) } e.parts = p return nil } +// Len returns the number of parts of the Entrypoint. func (e *Entrypoint) Len() int { if e == nil { return 0 @@ -45,6 +53,7 @@ func (e *Entrypoint) Len() int { return len(e.parts) } +// Slice gets the parts of the Entrypoint as a Slice of string. func (e *Entrypoint) Slice() []string { if e == nil { return nil @@ -52,18 +61,27 @@ func (e *Entrypoint) Slice() []string { return e.parts } +// NewEntrypoint creates an Entrypoint based on the specified parts (as strings). func NewEntrypoint(parts ...string) *Entrypoint { return &Entrypoint{parts} } +// Command encapsulates the container command. +// It might be represented as a string or an array of strings. +// We need to override the json decoder to accept both options. +// The JSON decoder will fail if the api sends an string and +// we try to decode it into an array of string. type Command struct { parts []string } +// ToString gets a string representing a Command. func (e *Command) ToString() string { return strings.Join(e.parts, " ") } +// MarshalJSON Marshals (or serializes) the Command into the json format. +// This method is needed to implement json.Marshaller. func (e *Command) MarshalJSON() ([]byte, error) { if e == nil { return []byte{}, nil @@ -71,7 +89,8 @@ func (e *Command) MarshalJSON() ([]byte, error) { return json.Marshal(e.Slice()) } -// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +// UnmarshalJSON decodes the entrypoint whether it's a string or an array of strings. +// This method is needed to implement json.Unmarshaler. func (e *Command) UnmarshalJSON(b []byte) error { if len(b) == 0 { return nil @@ -79,12 +98,17 @@ func (e *Command) UnmarshalJSON(b []byte) error { p := make([]string, 0, 1) if err := json.Unmarshal(b, &p); err != nil { - p = append(p, string(b)) + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) } e.parts = p return nil } +// Len returns the number of parts of the Entrypoint. func (e *Command) Len() int { if e == nil { return 0 @@ -92,6 +116,7 @@ func (e *Command) Len() int { return len(e.parts) } +// Slice gets the parts of the Entrypoint as a Slice of string. func (e *Command) Slice() []string { if e == nil { return nil @@ -99,49 +124,76 @@ func (e *Command) Slice() []string { return e.parts } +// NewCommand creates a Command based on the specified parts (as strings). func NewCommand(parts ...string) *Command { return &Command{parts} } -// Note: the Config structure should hold only portable information about the container. +// Config contains the configuration data about a container. +// It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. type Config struct { - Hostname string - Domainname string - User string - AttachStdin bool - AttachStdout bool - AttachStderr bool - PortSpecs []string // Deprecated - Can be in the format of 8080/tcp - ExposedPorts map[nat.Port]struct{} - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string - Cmd *Command - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} - VolumeDriver string - WorkingDir string - Entrypoint *Entrypoint - NetworkDisabled bool - MacAddress string - OnBuild []string - Labels map[string]string + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts map[nat.Port]struct{} // List of exposed ports + PublishService string // Name of the network service exposed by the container + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd *Command // Command to run when starting the container + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + VolumeDriver string // Name of the volume driver used to mount volumes + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint *Entrypoint // Entrypoint to run when starting the container + NetworkDisabled bool // Is network disabled + MacAddress string // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container } +// ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) +// and the corresponding HostConfig (non-portable). type ContainerConfigWrapper struct { *Config - *hostConfigWrapper + InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + *HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure. + } -func (c ContainerConfigWrapper) HostConfig() *HostConfig { - if c.hostConfigWrapper == nil { - return new(HostConfig) +// GetHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) GetHostConfig() *HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + + hc = w.InnerHostConfig } - return c.hostConfigWrapper.GetHostConfig() + if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + + return hc } // DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper @@ -156,5 +208,5 @@ func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { return nil, nil, err } - return w.Config, w.HostConfig(), nil + return w.Config, w.GetHostConfig(), nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go index 27727a49..9efe1dff 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go @@ -2,265 +2,112 @@ package runconfig import ( "bytes" + "encoding/json" "fmt" "io/ioutil" - "strings" "testing" - - "github.com/docker/docker/nat" ) -func parse(t *testing.T, args string) (*Config, *HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*Config, *HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return fmt.Errorf("strings don't match") -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) +func TestEntrypointMarshalJSON(t *testing.T) { + entrypoints := map[*Entrypoint]string{ + nil: "", + &Entrypoint{}: "null", + &Entrypoint{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, } - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d --rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/var"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) - } - - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } - - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } - - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } - - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:roZ -v /hostVar:/containerVar:rwZ"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:roZ", "/hostVar:/containerVar:rwZ") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:roZ -v /hostVar:/containerVar:rwZ` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } - - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:Z", "/hostVar:/containerVar:z") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) - } else if len(config.Volumes) != 0 { - t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) - } - - if _, _, err := parse(t, "-v /"); err == nil { - t.Fatalf("Expected error, but got none") - } - - if _, _, err := parse(t, "-v /:/"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") - } - if _, _, err := parse(t, "-v"); err == nil { - t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:ro"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") - } - if _, _, err := parse(t, "-v :"); err == nil { - t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") - } - if _, _, err := parse(t, "-v ::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") - } -} - -func TestCompare(t *testing.T) { - volumes1 := make(map[string]struct{}) - volumes1["/test1"] = struct{}{} - config1 := Config{ - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumes1, - } - config3 := Config{ - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumes1, - } - volumes2 := make(map[string]struct{}) - volumes2["/test2"] = struct{}{} - config5 := Config{ - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumes2, - } - if Compare(&config1, &config3) { - t.Fatalf("Compare should return false, PortSpecs are different") - } - if Compare(&config1, &config5) { - t.Fatalf("Compare should return false, Volumes are different") - } - if !Compare(&config1, &config1) { - t.Fatalf("Compare should return true") - } -} - -func TestMerge(t *testing.T) { - volumesImage := make(map[string]struct{}) - volumesImage["/test1"] = struct{}{} - volumesImage["/test2"] = struct{}{} - configImage := &Config{ - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumesImage, - } - - volumesUser := make(map[string]struct{}) - volumesUser["/test3"] = struct{}{} - configUser := &Config{ - PortSpecs: []string{"3333:2222", "3333:3333"}, - Env: []string{"VAR2=3", "VAR3=3"}, - Volumes: volumesUser, - } - - if err := Merge(configUser, configImage); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 3 { - t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + for entrypoint, expected := range entrypoints { + data, err := entrypoint.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) } } - if len(configUser.Env) != 3 { - t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) +} + +func TestEntrypointUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, } - for _, env := range configUser.Env { - if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { - t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + for json, expectedParts := range parts { + entrypoint := &Entrypoint{ + []string{"default", "values"}, + } + if err := entrypoint.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := entrypoint.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } } } +} - if len(configUser.Volumes) != 3 { - t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) +func TestCommandToString(t *testing.T) { + commands := map[*Command]string{ + &Command{[]string{""}}: "", + &Command{[]string{"one"}}: "one", + &Command{[]string{"one", "two"}}: "one two", } - for v := range configUser.Volumes { - if v != "/test1" && v != "/test2" && v != "/test3" { - t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + for command, expected := range commands { + toString := command.ToString() + if toString != expected { + t.Fatalf("Expected %v, got %v", expected, toString) } } +} - ports, _, err := nat.ParsePortSpecs([]string{"0000"}) - if err != nil { - t.Error(err) - } - configImage2 := &Config{ - ExposedPorts: ports, +func TestCommandMarshalJSON(t *testing.T) { + commands := map[*Command]string{ + nil: "", + &Command{}: "null", + &Command{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, } - if err := Merge(configUser, configImage2); err != nil { - t.Error(err) + for command, expected := range commands { + data, err := command.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) + } } +} - if len(configUser.ExposedPorts) != 4 { - t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) +func TestCommandUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + for json, expectedParts := range parts { + command := &Command{ + []string{"default", "values"}, + } + if err := command.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := command.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } } } } @@ -299,3 +146,83 @@ func TestDecodeContainerConfig(t *testing.T) { } } } + +func TestEntrypointUnmarshalString(t *testing.T) { + var e *Entrypoint + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestEntrypointUnmarshalSlice(t *testing.T) { + var e *Entrypoint + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestCommandUnmarshalSlice(t *testing.T) { + var e *Command + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestCommandUnmarshalString(t *testing.T) { + var e *Command + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go index 781cb355..1f13d7dd 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go @@ -4,18 +4,24 @@ import ( flag "github.com/docker/docker/pkg/mflag" ) +// ExecConfig is a small subset of the Config struct that hold the configuration +// for the exec feature of docker. type ExecConfig struct { - User string - Privileged bool - Tty bool - Container string - AttachStdin bool - AttachStderr bool - AttachStdout bool - Detach bool - Cmd []string + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + Container string // Name of the container (to execute in) + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard output + AttachStdout bool // Attach the standard error + Detach bool // Execute in detach mode + Cmd []string // Execution commands and args } +// ParseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +// If the minimal number of specified args is not right or if specified args are +// not valid, it will return an error. func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { var ( flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec_test.go new file mode 100644 index 00000000..a4b7ea9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec_test.go @@ -0,0 +1,129 @@ +package runconfig + +import ( + "fmt" + "io/ioutil" + "testing" + + flag "github.com/docker/docker/pkg/mflag" +) + +type arguments struct { + args []string +} + +func TestParseExec(t *testing.T) { + invalids := map[*arguments]error{ + &arguments{[]string{"-unknown"}}: fmt.Errorf("flag provided but not defined: -unknown"), + &arguments{[]string{"-u"}}: fmt.Errorf("flag needs an argument: -u"), + &arguments{[]string{"--user"}}: fmt.Errorf("flag needs an argument: --user"), + } + valids := map[*arguments]*ExecConfig{ + &arguments{ + []string{"container", "command"}, + }: { + Container: "container", + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + []string{"container", "command1", "command2"}, + }: { + Container: "container", + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + []string{"-i", "-t", "-u", "uid", "container", "command"}, + }: { + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Container: "container", + Cmd: []string{"command"}, + }, + &arguments{ + []string{"-d", "container", "command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Container: "container", + Cmd: []string{"command"}, + }, + &arguments{ + []string{"-t", "-i", "-d", "container", "command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Tty: true, + Container: "container", + Cmd: []string{"command"}, + }, + } + for invalid, expectedError := range invalids { + cmd := flag.NewFlagSet("exec", flag.ContinueOnError) + cmd.ShortUsage = func() {} + cmd.SetOutput(ioutil.Discard) + _, err := ParseExec(cmd, invalid.args) + if err == nil || err.Error() != expectedError.Error() { + t.Fatalf("Expected an error [%v] for %v, got %v", expectedError, invalid, err) + } + + } + for valid, expectedExecConfig := range valids { + cmd := flag.NewFlagSet("exec", flag.ContinueOnError) + cmd.ShortUsage = func() {} + cmd.SetOutput(ioutil.Discard) + execConfig, err := ParseExec(cmd, valid.args) + if err != nil { + t.Fatal(err) + } + if !compareExecConfig(expectedExecConfig, execConfig) { + t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) + } + } +} + +func compareExecConfig(config1 *ExecConfig, config2 *ExecConfig) bool { + if config1.AttachStderr != config2.AttachStderr { + return false + } + if config1.AttachStdin != config2.AttachStdin { + return false + } + if config1.AttachStdout != config2.AttachStdout { + return false + } + if config1.Container != config2.Container { + return false + } + if config1.Detach != config2.Detach { + return false + } + if config1.Privileged != config2.Privileged { + return false + } + if config1.Tty != config2.Tty { + return false + } + if config1.User != config2.User { + return false + } + if len(config1.Cmd) != len(config2.Cmd) { + return false + } + for index, value := range config1.Cmd { + if value != config2.Cmd[index] { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_14.json b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_14.json new file mode 100644 index 00000000..c72ac91c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_19.json b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_19.json new file mode 100644 index 00000000..5ca8aa7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.env b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.env new file mode 100644 index 00000000..3afbdc81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.env @@ -0,0 +1 @@ +ENV1=value1 diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.label b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.label new file mode 100644 index 00000000..b4208bdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/valid.label @@ -0,0 +1 @@ +LABEL1=value1 diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go index 1418dea4..f6a89a31 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go @@ -5,55 +5,39 @@ import ( "io" "strings" - "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/ulimit" ) +// KeyValuePair is a structure that hold a value for a key. type KeyValuePair struct { Key string Value string } +// NetworkMode represents the container network stack. type NetworkMode string -// IsPrivate indicates whether container use it's private network stack -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -func (n NetworkMode) IsNone() bool { - return n == "none" -} - +// IpcMode represents the container ipc stack. type IpcMode string -// IsPrivate indicates whether container use it's private ipc stack +// IsPrivate indicates whether the container uses it's private ipc stack. func (n IpcMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } +// IsHost indicates whether the container uses the host's ipc stack. func (n IpcMode) IsHost() bool { return n == "host" } +// IsContainer indicates whether the container uses a container's ipc stack. func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } +// Valid indicates whether the ipc stack is valid. func (n IpcMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { @@ -68,6 +52,7 @@ func (n IpcMode) Valid() bool { return true } +// Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { @@ -76,17 +61,20 @@ func (n IpcMode) Container() string { return "" } +// UTSMode represents the UTS namespace of the container. type UTSMode string -// IsPrivate indicates whether container use it's private UTS namespace +// IsPrivate indicates whether the container uses it's private UTS namespace. func (n UTSMode) IsPrivate() bool { return !(n.IsHost()) } +// IsHost indicates whether the container uses the host's UTS namespace. func (n UTSMode) IsHost() bool { return n == "host" } +// Valid indicates whether the UTS namespace is valid. func (n UTSMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { @@ -97,17 +85,20 @@ func (n UTSMode) Valid() bool { return true } +// PidMode represents the pid stack of the container. type PidMode string -// IsPrivate indicates whether container use it's private pid stack +// IsPrivate indicates whether the container uses it's private pid stack. func (n PidMode) IsPrivate() bool { return !(n.IsHost()) } +// IsHost indicates whether the container uses the host's pid stack. func (n PidMode) IsHost() bool { return n == "host" } +// Valid indicates whether the pid stack is valid. func (n PidMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { @@ -118,38 +109,49 @@ func (n PidMode) Valid() bool { return true } +// DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string PathInContainer string CgroupPermissions string } +// RestartPolicy represents the restart policies of the container. type RestartPolicy struct { Name string MaximumRetryCount int } +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { return rp.Name == "no" } +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. func (rp *RestartPolicy) IsAlways() bool { return rp.Name == "always" } +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the contain will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { return rp.Name == "on-failure" } +// LogConfig represents the logging configuration of the container. type LogConfig struct { Type string Config map[string]string } +// LxcConfig represents the specific LXC configuration of the container. type LxcConfig struct { values []KeyValuePair } +// MarshalJSON marshals (or serializes) the LxcConfig into JSON. func (c *LxcConfig) MarshalJSON() ([]byte, error) { if c == nil { return []byte{}, nil @@ -157,6 +159,8 @@ func (c *LxcConfig) MarshalJSON() ([]byte, error) { return json.Marshal(c.Slice()) } +// UnmarshalJSON unmarshals (or deserializes) the specified byte slices from JSON to +// a LxcConfig. func (c *LxcConfig) UnmarshalJSON(b []byte) error { if len(b) == 0 { return nil @@ -177,6 +181,7 @@ func (c *LxcConfig) UnmarshalJSON(b []byte) error { return nil } +// Len returns the number of specific lxc configuration. func (c *LxcConfig) Len() int { if c == nil { return 0 @@ -184,6 +189,7 @@ func (c *LxcConfig) Len() int { return len(c.values) } +// Slice returns the specific lxc configuration into a slice of KeyValuePair. func (c *LxcConfig) Slice() []KeyValuePair { if c == nil { return nil @@ -191,90 +197,123 @@ func (c *LxcConfig) Slice() []KeyValuePair { return c.values } +// NewLxcConfig creates a LxcConfig from the specified slice of KeyValuePair. func NewLxcConfig(values []KeyValuePair) *LxcConfig { return &LxcConfig{values} } -type HostConfig struct { - Binds []string - ContainerIDFile string - LxcConf *LxcConfig - Memory int64 // Memory limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap - CpuShares int64 // CPU shares (relative weight vs. other containers) - CpuPeriod int64 - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - CpuQuota int64 - BlkioWeight int64 // Block IO weight (relative weight vs. other containers) - OomKillDisable bool // Whether to disable OOM Killer or not - Privileged bool - PortBindings nat.PortMap - Links []string - PublishAllPorts bool - Dns []string - DnsSearch []string - ExtraHosts []string - VolumesFrom []string - Devices []DeviceMapping - NetworkMode NetworkMode - IpcMode IpcMode - PidMode PidMode - UTSMode UTSMode - CapAdd []string - CapDrop []string - RestartPolicy RestartPolicy - SecurityOpt []string - ReadonlyRootfs bool - Ulimits []*ulimit.Ulimit - LogConfig LogConfig - CgroupParent string // Parent cgroup. +// CapList represents the list of capabilities of the container. +type CapList struct { + caps []string } +// MarshalJSON marshals (or serializes) the CapList into JSON. +func (c *CapList) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte{}, nil + } + return json.Marshal(c.Slice()) +} + +// UnmarshalJSON unmarshals (or deserializes) the specified byte slices +// from JSON to a CapList. +func (c *CapList) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + var caps []string + if err := json.Unmarshal(b, &caps); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + caps = append(caps, s) + } + c.caps = caps + + return nil +} + +// Len returns the number of specific kernel capabilities. +func (c *CapList) Len() int { + if c == nil { + return 0 + } + return len(c.caps) +} + +// Slice returns the specific capabilities into a slice of KeyValuePair. +func (c *CapList) Slice() []string { + if c == nil { + return nil + } + return c.caps +} + +// NewCapList creates a CapList from a slice of string. +func NewCapList(caps []string) *CapList { + return &CapList{caps} +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LxcConf *LxcConfig // Additional lxc configuration + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + BlkioWeight int64 // Block IO weight (relative weight vs. other containers) + OomKillDisable bool // Whether to disable OOM Killer or not + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + Privileged bool // Is the container in privileged mode + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + Links []string // List of links (in the name:alias form) + PublishAllPorts bool // Should docker publish all exposed port for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + VolumesFrom []string // List of volumes to take from other container + Devices []DeviceMapping // List of devices to map inside the container + NetworkMode NetworkMode // Network namespace to use for the container + IpcMode IpcMode // IPC namespace to use for the container + PidMode PidMode // PID namespace to use for the container + UTSMode UTSMode // UTS namespace to use for the container + CapAdd *CapList // List of kernel capabilities to add to the container + CapDrop *CapList // List of kernel capabilities to remove from the container + GroupAdd []string // List of additional groups that the container process will run as + RestartPolicy RestartPolicy // Restart policy to be used for the container + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + ReadonlyRootfs bool // Is the container root filesystem in read-only + Ulimits []*ulimit.Ulimit // List of ulimits to be set in the container + LogConfig LogConfig // Configuration of the logs for this container + CgroupParent string // Parent cgroup. + ConsoleSize [2]int // Initial console size on Windows +} + +// MergeConfigs merges the specified container Config and HostConfig. +// It creates a ContainerConfigWrapper. func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { return &ContainerConfigWrapper{ config, - &hostConfigWrapper{InnerHostConfig: hostConfig}, + hostConfig, + "", nil, } } -type hostConfigWrapper struct { - InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` - Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - - *HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure. -} - -func (w hostConfigWrapper) GetHostConfig() *HostConfig { - hc := w.HostConfig - - if hc == nil && w.InnerHostConfig != nil { - hc = w.InnerHostConfig - } else if w.InnerHostConfig != nil { - if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { - w.InnerHostConfig.Memory = hc.Memory - } - if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { - w.InnerHostConfig.MemorySwap = hc.MemorySwap - } - if hc.CpuShares != 0 && w.InnerHostConfig.CpuShares == 0 { - w.InnerHostConfig.CpuShares = hc.CpuShares - } - - hc = w.InnerHostConfig - } - - if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" { - hc.CpusetCpus = w.Cpuset - } - - return hc -} - +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. func DecodeHostConfig(src io.Reader) (*HostConfig, error) { decoder := json.NewDecoder(src) - var w hostConfigWrapper + var w ContainerConfigWrapper if err := decoder.Decode(&w); err != nil { return nil, err } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_test.go new file mode 100644 index 00000000..7c0befc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_test.go @@ -0,0 +1,303 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "testing" +) + +func TestNetworkModeTest(t *testing.T) { + networkModes := map[NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[NetworkMode]string{ + "": "", + "something:weird": "", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[RestartPolicy][]bool{ + // none, always, failure + RestartPolicy{}: {false, false, false}, + RestartPolicy{"something", 0}: {false, false, false}, + RestartPolicy{"no", 0}: {true, false, false}, + RestartPolicy{"always", 0}: {false, true, false}, + RestartPolicy{"on-failure", 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} + +func TestLxcConfigMarshalJSON(t *testing.T) { + lxcConfigs := map[*LxcConfig]string{ + nil: "", + &LxcConfig{}: "null", + &LxcConfig{ + []KeyValuePair{{"key1", "value1"}}, + }: `[{"Key":"key1","Value":"value1"}]`, + } + + for lxcconfig, expected := range lxcConfigs { + data, err := lxcconfig.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) + } + } +} + +func TestLxcConfigUnmarshalJSON(t *testing.T) { + keyvaluePairs := map[string][]KeyValuePair{ + "": {{"key1", "value1"}}, + "[]": {}, + `[{"Key":"key2","Value":"value2"}]`: {{"key2", "value2"}}, + } + for json, expectedParts := range keyvaluePairs { + lxcConfig := &LxcConfig{ + []KeyValuePair{{"key1", "value1"}}, + } + if err := lxcConfig.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := lxcConfig.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v keyvaluePairs, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } + } + } +} + +func TestMergeConfigs(t *testing.T) { + expectedHostname := "hostname" + expectedContainerIDFile := "containerIdFile" + config := &Config{ + Hostname: expectedHostname, + } + hostConfig := &HostConfig{ + ContainerIDFile: expectedContainerIDFile, + } + containerConfigWrapper := MergeConfigs(config, hostConfig) + if containerConfigWrapper.Config.Hostname != expectedHostname { + t.Fatalf("containerConfigWrapper config hostname expected %v got %v", expectedHostname, containerConfigWrapper.Config.Hostname) + } + if containerConfigWrapper.InnerHostConfig.ContainerIDFile != expectedContainerIDFile { + t.Fatalf("containerConfigWrapper hostconfig containerIdfile expected %v got %v", expectedContainerIDFile, containerConfigWrapper.InnerHostConfig.ContainerIDFile) + } + if containerConfigWrapper.Cpuset != "" { + t.Fatalf("Expected empty Cpuset, got %v", containerConfigWrapper.Cpuset) + } +} + +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/container_hostconfig_1_14.json"}, + {"fixtures/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if c.CapAdd.Len() != 1 && c.CapAdd.Slice()[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if c.CapDrop.Len() != 1 && c.CapDrop.Slice()[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) + } + } +} + +func TestCapListUnmarshalSliceAndString(t *testing.T) { + var cl *CapList + cap0, err := json.Marshal([]string{"CAP_SOMETHING"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(cap0, &cl); err != nil { + t.Fatal(err) + } + + slice := cl.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "CAP_SOMETHING" { + t.Fatalf("expected `CAP_SOMETHING`, got: %q", slice[0]) + } + + cap1, err := json.Marshal("CAP_SOMETHING") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(cap1, &cl); err != nil { + t.Fatal(err) + } + + slice = cl.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "CAP_SOMETHING" { + t.Fatalf("expected `CAP_SOMETHING`, got: %q", slice[0]) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_unix.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 00000000..5239cb7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package runconfig + +import ( + "strings" +) + +// IsPrivate indicates whether container uses it's private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() NetworkMode { + return NetworkMode("bridge") +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_windows.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_windows.go new file mode 100644 index 00000000..a4c0297b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -0,0 +1,20 @@ +package runconfig + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() NetworkMode { + return NetworkMode("default") +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go index 9c9a3b43..9a020a88 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go @@ -3,10 +3,14 @@ package runconfig import ( "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/nat" ) +// Merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. func Merge(userConf, imageConf *Config) error { if userConf.User == "" { userConf.User = imageConf.User @@ -24,39 +28,6 @@ func Merge(userConf, imageConf *Config) error { } } - if len(userConf.PortSpecs) > 0 { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(nat.PortSet) - } - ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) - if err != nil { - return err - } - for port := range ports { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - userConf.PortSpecs = nil - } - if len(imageConf.PortSpecs) > 0 { - // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. - logrus.Debugf("Migrating image port specs to container: %s", strings.Join(imageConf.PortSpecs, ", ")) - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(nat.PortSet) - } - - ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) - if err != nil { - return err - } - for port := range ports { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - if len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { @@ -67,6 +38,7 @@ func Merge(userConf, imageConf *Config) error { userEnvKey := strings.Split(userEnv, "=")[0] if imageEnvKey == userEnvKey { found = true + break } } if !found { @@ -75,16 +47,6 @@ func Merge(userConf, imageConf *Config) error { } } - if userConf.Labels == nil { - userConf.Labels = map[string]string{} - } - if imageConf.Labels != nil { - for l := range userConf.Labels { - imageConf.Labels[l] = userConf.Labels[l] - } - userConf.Labels = imageConf.Labels - } - if userConf.Entrypoint.Len() == 0 { if userConf.Cmd.Len() == 0 { userConf.Cmd = imageConf.Cmd diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge_test.go new file mode 100644 index 00000000..6237ee9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge_test.go @@ -0,0 +1,83 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/pkg/nat" +) + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := Merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &Config{ + ExposedPorts: ports, + } + + if err := Merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go index 46ec2678..5528d7af 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go @@ -5,23 +5,49 @@ import ( "strconv" "strings" - "github.com/docker/docker/nat" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/pkg/units" ) var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior") - ErrConflictNetworkAndDns = fmt.Errorf("Conflicting options: --dns and the network mode (--net)") - ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") - ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior") - ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: --mac-address and the network mode (--net)") - ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: --add-host and the network mode (--net)") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: --dns and the network mode (--net)") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: --mac-address and the network mode (--net)") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: --add-host and the network mode (--net)") + // ErrConflictNetworkPublishPorts conflict between the pulbish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: -p, -P, --publish-all, --publish and the network mode (--net)") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: --expose and the network mode (--expose)") ) +// validateNM is the set of fields passed to validateNetMode() +type validateNM struct { + netMode NetworkMode + flHostname *string + flLinks opts.ListOpts + flDNS opts.ListOpts + flExtraHosts opts.ListOpts + flMacAddress *string + flPublish opts.ListOpts + flPublishAll *bool + flExpose opts.ListOpts + flVolumeDriver string +} + +// Parse parses the specified args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? @@ -30,21 +56,21 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe flLinks = opts.NewListOpts(opts.ValidateLink) flEnv = opts.NewListOpts(opts.ValidateEnv) flLabels = opts.NewListOpts(opts.ValidateEnv) - flDevices = opts.NewListOpts(opts.ValidatePath) + flDevices = opts.NewListOpts(opts.ValidateDevice) - ulimits = make(map[string]*ulimit.Ulimit) - flUlimits = opts.NewUlimitOpt(ulimits) + flUlimits = opts.NewUlimitOpt(nil) flPublish = opts.NewListOpts(nil) flExpose = opts.NewListOpts(nil) - flDns = opts.NewListOpts(opts.ValidateIPAddress) - flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) + flDNS = opts.NewListOpts(opts.ValidateIPAddress) + flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) flVolumesFrom = opts.NewListOpts(nil) flLxcOpts = opts.NewListOpts(nil) flEnvFile = opts.NewListOpts(nil) flCapAdd = opts.NewListOpts(nil) flCapDrop = opts.NewListOpts(nil) + flGroupAdd = opts.NewListOpts(nil) flSecurityOpt = opts.NewListOpts(nil) flLabelsFile = opts.NewListOpts(nil) flLoggingOpts = opts.NewListOpts(nil) @@ -64,19 +90,21 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") - flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flCpuPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") - flCpuQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS quota") flBlkioWeight = cmd.Int64([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") - flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container") + flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tuning container memory swappiness (0 to 100)") + flNetMode = cmd.String([]string{"-net"}, "default", "Set the Network mode for the container") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") @@ -89,13 +117,14 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports") - cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") - cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flDNS, []string{"#dns", "-dns"}, "Set custom DNS servers") + cmd.Var(&flDNSSearch, []string{"-dns-search"}, "Set custom DNS search domains") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options") cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") + cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") @@ -119,28 +148,20 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) } - if (netMode.IsHost() || netMode.IsContainer()) && *flHostname != "" { - return nil, nil, cmd, ErrConflictNetworkHostname + vals := validateNM{ + netMode: netMode, + flHostname: flHostname, + flLinks: flLinks, + flDNS: flDNS, + flExtraHosts: flExtraHosts, + flMacAddress: flMacAddress, + flPublish: flPublish, + flPublishAll: flPublishAll, + flExpose: flExpose, } - if netMode.IsHost() && flLinks.Len() > 0 { - return nil, nil, cmd, ErrConflictHostNetworkAndLinks - } - - if netMode.IsContainer() && flLinks.Len() > 0 { - return nil, nil, cmd, ErrConflictContainerNetworkAndLinks - } - - if (netMode.IsHost() || netMode.IsContainer()) && flDns.Len() > 0 { - return nil, nil, cmd, ErrConflictNetworkAndDns - } - - if (netMode.IsContainer() || netMode.IsHost()) && flExtraHosts.Len() > 0 { - return nil, nil, cmd, ErrConflictNetworkHosts - } - - if (netMode.IsContainer() || netMode.IsHost()) && *flMacAddress != "" { - return nil, nil, cmd, ErrConflictContainerNetworkAndMac + if err := validateNetMode(&vals); err != nil { + return nil, nil, cmd, err } // Validate the input mac address @@ -149,14 +170,13 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) } } - - // If neither -d or -a are set, attach to everything by default + if *flStdin { + attachStdin = true + } + // If -a is not set attach to the output stdio if flAttach.Len() == 0 { attachStdout = true attachStderr = true - if *flStdin { - attachStdin = true - } } var flMemory int64 @@ -181,6 +201,11 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe } } + swappiness := *flSwappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, nil, cmd, fmt.Errorf("Invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { @@ -245,7 +270,10 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err) } for i := start; i <= end; i++ { - p := nat.NewPort(proto, strconv.FormatUint(i, 10)) + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, nil, cmd, err + } if _, exists := ports[p]; !exists { ports[p] = struct{}{} } @@ -302,7 +330,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe config := &Config{ Hostname: hostname, Domainname: domainname, - PortSpecs: nil, // Deprecated ExposedPorts: ports, User: *flUser, Tty: *flTty, @@ -319,42 +346,45 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe Entrypoint: entrypoint, WorkingDir: *flWorkingDir, Labels: convertKVStringsToMap(labels), + VolumeDriver: *flVolumeDriver, } hostConfig := &HostConfig{ - Binds: binds, - ContainerIDFile: *flContainerIDFile, - LxcConf: lxcConf, - Memory: flMemory, - MemorySwap: MemorySwap, - CpuShares: *flCpuShares, - CpuPeriod: *flCpuPeriod, - CpusetCpus: *flCpusetCpus, - CpusetMems: *flCpusetMems, - CpuQuota: *flCpuQuota, - BlkioWeight: *flBlkioWeight, - OomKillDisable: *flOomKillDisable, - Privileged: *flPrivileged, - PortBindings: portBindings, - Links: flLinks.GetAll(), - PublishAllPorts: *flPublishAll, - Dns: flDns.GetAll(), - DnsSearch: flDnsSearch.GetAll(), - ExtraHosts: flExtraHosts.GetAll(), - VolumesFrom: flVolumesFrom.GetAll(), - NetworkMode: netMode, - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - Devices: deviceMappings, - CapAdd: flCapAdd.GetAll(), - CapDrop: flCapDrop.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: flSecurityOpt.GetAll(), - ReadonlyRootfs: *flReadonlyRootfs, - Ulimits: flUlimits.GetList(), - LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, - CgroupParent: *flCgroupParent, + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Memory: flMemory, + MemorySwap: MemorySwap, + CPUShares: *flCPUShares, + CPUPeriod: *flCPUPeriod, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUQuota: *flCPUQuota, + BlkioWeight: *flBlkioWeight, + OomKillDisable: *flOomKillDisable, + MemorySwappiness: flSwappiness, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + DNS: flDNS.GetAll(), + DNSSearch: flDNSSearch.GetAll(), + ExtraHosts: flExtraHosts.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: netMode, + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + Devices: deviceMappings, + CapAdd: NewCapList(flCapAdd.GetAll()), + CapDrop: NewCapList(flCapDrop.GetAll()), + GroupAdd: flGroupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: flSecurityOpt.GetAll(), + ReadonlyRootfs: *flReadonlyRootfs, + Ulimits: flUlimits.GetList(), + LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, + CgroupParent: *flCgroupParent, } applyExperimentalFlags(expFlags, config, hostConfig) @@ -402,7 +432,6 @@ func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]st if loggingDriver == "none" && len(loggingOpts) > 0 { return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver) } - //TODO - validation step return loggingOptsMap, nil } @@ -422,12 +451,15 @@ func ParseRestartPolicy(policy string) (RestartPolicy, error) { p.Name = name switch name { case "always": - if len(parts) == 2 { + if len(parts) > 1 { return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"") } case "no": // do nothing case "on-failure": + if len(parts) > 2 { + return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") + } if len(parts) == 2 { count, err := strconv.Atoi(parts[1]) if err != nil { @@ -443,25 +475,6 @@ func ParseRestartPolicy(policy string) (RestartPolicy, error) { return p, nil } -// options will come in the format of name.key=value or name.option -func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { - out := make(map[string][]string, len(opts.GetAll())) - for _, o := range opts.GetAll() { - parts := strings.SplitN(o, ".", 2) - if len(parts) < 2 { - return nil, fmt.Errorf("invalid opt format %s", o) - } else if strings.TrimSpace(parts[0]) == "" { - return nil, fmt.Errorf("key cannot be empty %s", o) - } - values, exists := out[parts[0]] - if !exists { - values = []string{} - } - out[parts[0]] = append(values, parts[1]) - } - return out, nil -} - func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) { out := make([]KeyValuePair, opts.Len()) for i, o := range opts.GetAll() { @@ -474,20 +487,7 @@ func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) { return out, nil } -func parseNetMode(netMode string) (NetworkMode, error) { - parts := strings.Split(netMode, ":") - switch mode := parts[0]; mode { - case "bridge", "none", "host": - case "container": - if len(parts) < 2 || parts[1] == "" { - return "", fmt.Errorf("invalid container format container:") - } - default: - return "", fmt.Errorf("invalid --net: %s", netMode) - } - return NetworkMode(netMode), nil -} - +// ParseDevice parses a device mapping string to a DeviceMapping struct func ParseDevice(device string) (DeviceMapping, error) { src := "" dst := "" diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_experimental.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_experimental.go index 886b377f..8f8612ba 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_experimental.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_experimental.go @@ -10,10 +10,10 @@ type experimentalFlags struct { func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags { flags := make(map[string]interface{}) - flags["volume-driver"] = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") + flags["publish-service"] = cmd.String([]string{"-publish-service"}, "", "Publish this container as a service") return &experimentalFlags{flags: flags} } func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) { - config.VolumeDriver = *(exp.flags["volume-driver"]).(*string) + config.PublishService = *(exp.flags["publish-service"]).(*string) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go index 6c0a1cfc..8916e7d4 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go @@ -1,10 +1,13 @@ package runconfig import ( + "fmt" "io/ioutil" + "strings" "testing" flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/parsers" ) @@ -15,6 +18,163 @@ func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { return Parse(cmd, args) } +func parse(t *testing.T, args string) (*Config, *HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*Config, *HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } else if _, exists := config.Volumes["/var"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /containerTmp:ro -v /containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/containerTmp:ro", "/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro,Z -v /hostVar:/containerVar:rw,Z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro,Z", "/hostVar:/containerVar:rw,Z") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro,Z -v /hostVar:/containerVar:rw,Z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:Z", "/hostVar:/containerVar:z") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) + } else if len(config.Volumes) != 0 { + t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + } + + if _, _, err := parse(t, "-v /"); err == nil { + t.Fatalf("Expected error, but got none") + } + + if _, _, err := parse(t, "-v /:/"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + } + if _, _, err := parse(t, "-v"); err == nil { + t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + } + if _, _, err := parse(t, "-v :"); err == nil { + t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + } + if _, _, err := parse(t, "-v ::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + } +} + func TestParseLxcConfOpt(t *testing.T) { opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} @@ -30,6 +190,18 @@ func TestParseLxcConfOpt(t *testing.T) { t.Fail() } } + + // With parseRun too + _, hostconfig, _, err := parseRun([]string{"lxc.utsname=docker", "lxc.utsname = docker ", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + for _, lxcConf := range hostconfig.LxcConf.Slice() { + if lxcConf.Key != "lxc.utsname" || lxcConf.Value != "docker" { + t.Fail() + } + } + } func TestNetHostname(t *testing.T) { @@ -56,10 +228,331 @@ func TestNetHostname(t *testing.T) { if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } + if _, _, _, err := parseRun([]string{"--net=container", "img", "cmd"}); err == nil || err.Error() != "--net: invalid net mode: invalid container format container:" { + t.Fatalf("Expected error with --net=container, got : %v", err) + } + if _, _, _, err := parseRun([]string{"--net=weird", "img", "cmd"}); err == nil || err.Error() != "--net: invalid net mode: invalid --net: weird" { + t.Fatalf("Expected error with --net=weird, got: %s", err) + } } func TestConflictContainerNetworkAndLinks(t *testing.T) { if _, _, _, err := parseRun([]string{"--net=container:other", "--link=zip:zap", "img", "cmd"}); err != ErrConflictContainerNetworkAndLinks { t.Fatalf("Expected error ErrConflictContainerNetworkAndLinks, got: %s", err) } + if _, _, _, err := parseRun([]string{"--net=host", "--link=zip:zap", "img", "cmd"}); err != ErrConflictHostNetworkAndLinks { + t.Fatalf("Expected error ErrConflictHostNetworkAndLinks, got: %s", err) + } +} + +func TestConflictNetworkModeAndOptions(t *testing.T) { + if _, _, _, err := parseRun([]string{"--net=host", "--dns=8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkAndDNS { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--dns=8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkAndDNS { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=host", "--add-host=name:8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkHosts { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--add-host=name:8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkHosts { + t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=host", "--mac-address=92:d0:c6:0a:29:33", "img", "cmd"}); err != ErrConflictContainerNetworkAndMac { + t.Fatalf("Expected error ErrConflictContainerNetworkAndMac, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "img", "cmd"}); err != ErrConflictContainerNetworkAndMac { + t.Fatalf("Expected error ErrConflictContainerNetworkAndMac, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "-P", "img", "cmd"}); err != ErrConflictNetworkPublishPorts { + t.Fatalf("Expected error ErrConflictNetworkPublishPorts, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "-p", "8080", "img", "cmd"}); err != ErrConflictNetworkPublishPorts { + t.Fatalf("Expected error ErrConflictNetworkPublishPorts, got %s", err) + } + if _, _, _, err := parseRun([]string{"--net=container:other", "--expose", "8000-9000", "img", "cmd"}); err != ErrConflictNetworkExposePorts { + t.Fatalf("Expected error ErrConflictNetworkExposePorts, got %s", err) + } +} + +// Simple parse with MacAddress validatation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestParseWithMemory(t *testing.T) { + invalidMemory := "--memory=invalid" + validMemory := "--memory=1G" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { + t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) + } +} + +func TestParseWithMemorySwap(t *testing.T) { + invalidMemory := "--memory-swap=invalid" + validMemory := "--memory-swap=1G" + anotherValidMemory := "--memory-swap=-1" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { + t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } + if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { + t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } +} + +func TestParseHostname(t *testing.T) { + hostname := "--hostname=hostname" + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + if config, _ := mustParse(t, hostname); config.Hostname != "hostname" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname" && config.Domainname != "domainname" { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname" && config.Domainname != "domainname.tld" { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "Invalid port format for --expose: :", + "8080:9090": "Invalid port format for --expose: 8080:9090", + "/tcp": "Invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "Invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `Invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `Invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `Invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `Invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + valids := map[string]DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:ro": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "ro", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseModes(t *testing.T) { + // ipc ko + if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { + t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) + } + // ipc ok + _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.IpcMode.Valid() { + t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) + } + // pid ko + if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { + t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) + } + // pid ok + _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + // uts ko + if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { + t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) + } + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "something": "invalid restart policy something", + "always:2": "maximum restart count not valid with restart policy of \"always\"", + "always:2:3": "maximum restart count not valid with restart policy of \"always\"", + "on-failure:invalid": `strconv.ParseInt: parsing "invalid": invalid syntax`, + "on-failure:2:5": "restart count format is not valid, usage: 'on-failure:N' or 'on-failure'", + } + valids := map[string]RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "Invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'Invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != "open nonexistent: no such file or directory" { + t.Fatalf("Expected an error with message 'open nonexistent: no such file or directory', got %v", err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != "open nonexistent: no such file or directory" { + t.Fatalf("Expected an error with message 'open nonexistent: no such file or directory', got %v", err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if config.Entrypoint.Len() != 1 && config.Entrypoint.parts[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_unix.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_unix.go new file mode 100644 index 00000000..7086b1ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package runconfig + +import ( + "fmt" + "strings" +) + +func parseNetMode(netMode string) (NetworkMode, error) { + parts := strings.Split(netMode, ":") + switch mode := parts[0]; mode { + case "default", "bridge", "none", "host": + case "container": + if len(parts) < 2 || parts[1] == "" { + return "", fmt.Errorf("invalid container format container:") + } + default: + return "", fmt.Errorf("invalid --net: %s", netMode) + } + return NetworkMode(netMode), nil +} + +func validateNetMode(vals *validateNM) error { + + if (vals.netMode.IsHost() || vals.netMode.IsContainer()) && *vals.flHostname != "" { + return ErrConflictNetworkHostname + } + + if vals.netMode.IsHost() && vals.flLinks.Len() > 0 { + return ErrConflictHostNetworkAndLinks + } + + if vals.netMode.IsContainer() && vals.flLinks.Len() > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if (vals.netMode.IsHost() || vals.netMode.IsContainer()) && vals.flDNS.Len() > 0 { + return ErrConflictNetworkAndDNS + } + + if (vals.netMode.IsContainer() || vals.netMode.IsHost()) && vals.flExtraHosts.Len() > 0 { + return ErrConflictNetworkHosts + } + + if (vals.netMode.IsContainer() || vals.netMode.IsHost()) && *vals.flMacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if vals.netMode.IsContainer() && (vals.flPublish.Len() > 0 || *vals.flPublishAll == true) { + return ErrConflictNetworkPublishPorts + } + + if vals.netMode.IsContainer() && vals.flExpose.Len() > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_windows.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_windows.go new file mode 100644 index 00000000..ca0a2e6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_windows.go @@ -0,0 +1,20 @@ +package runconfig + +import ( + "fmt" + "strings" +) + +func parseNetMode(netMode string) (NetworkMode, error) { + parts := strings.Split(netMode, ":") + switch mode := parts[0]; mode { + case "default", "none": + default: + return "", fmt.Errorf("invalid --net: %s", netMode) + } + return NetworkMode(netMode), nil +} + +func validateNetMode(vals *validateNM) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/experimental.go b/Godeps/_workspace/src/github.com/docker/docker/utils/experimental.go new file mode 100644 index 00000000..ceed0cb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/utils/experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package utils + +// ExperimentalBuild is a stub which always returns true for +// builds that include the "experimental" build tag +func ExperimentalBuild() bool { + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/git.go b/Godeps/_workspace/src/github.com/docker/docker/utils/git.go new file mode 100644 index 00000000..4d0bb164 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/utils/git.go @@ -0,0 +1,100 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" +) + +// GitClone clones a repository into a newly created directory which +// will be under "docker-build-git" +func GitClone(remoteURL string) (string, error) { + if !urlutil.IsGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + u, err := url.Parse(remoteURL) + if err != nil { + return "", err + } + + fragment := u.Fragment + clone := cloneArgs(u, root) + + if output, err := git(clone...); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + + return checkoutGit(fragment, root) +} + +func cloneArgs(remoteURL *url.URL, root string) []string { + args := []string{"clone", "--recursive"} + shallow := len(remoteURL.Fragment) == 0 + + if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + if remoteURL.Fragment != "" { + remoteURL.Fragment = "" + } + + return append(args, remoteURL.String(), root) +} + +func checkoutGit(fragment, root string) (string, error) { + refAndDir := strings.SplitN(fragment, ":", 2) + + if len(refAndDir[0]) != 0 { + if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) + if err != nil { + return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/git_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/git_test.go new file mode 100644 index 00000000..e9eb5956 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/utils/git_test.go @@ -0,0 +1,186 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsGit(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsStripFragment(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker#test") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + if err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { + t.Fatal(err) + } + + subDir := filepath.Join(gitDir, "subdir") + if err = os.Mkdir(subDir, 0755); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { + t.Fatal(err) + } + + cases := []struct { + frag string + exp string + fail bool + }{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch\nEXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch\nEXPOSE 5000", false}, + {"master:parentlink", "FROM scratch\nEXPOSE 5000", false}, + {"master:absolutelink", "FROM scratch\nEXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch\nEXPOSE 3000", false}, + {"test:", "FROM scratch\nEXPOSE 3000", false}, + {"test:subdir", "FROM busybox\nEXPOSE 5000", false}, + } + + for _, c := range cases { + r, err := checkoutGit(c.frag, gitDir) + + fail := err != nil + if fail != c.fail { + t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) + } + if c.fail { + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + if err != nil { + t.Fatal(err) + } + + if string(b) != c.exp { + t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/stubs.go b/Godeps/_workspace/src/github.com/docker/docker/utils/stubs.go new file mode 100644 index 00000000..8a496d39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/utils/stubs.go @@ -0,0 +1,9 @@ +// +build !experimental + +package utils + +// ExperimentalBuild is a stub which always returns false for +// builds that do not include the "experimental" build tag +func ExperimentalBuild() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go new file mode 100644 index 00000000..8c98d472 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go @@ -0,0 +1,288 @@ +package utils + +import ( + "bufio" + "crypto/sha1" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/stringid" +) + +// SelfPath figures out the absolute path of our own binary (if it's still around). +func SelfPath() string { + path, err := exec.LookPath(os.Args[0]) + if err != nil { + if os.IsNotExist(err) { + return "" + } + if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { + return "" + } + panic(err) + } + path, err = filepath.Abs(path) + if err != nil { + if os.IsNotExist(err) { + return "" + } + panic(err) + } + return path +} + +func dockerInitSha1(target string) string { + f, err := os.Open(target) + if err != nil { + return "" + } + defer f.Close() + h := sha1.New() + _, err = io.Copy(h, f) + if err != nil { + return "" + } + return hex.EncodeToString(h.Sum(nil)) +} + +func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) + if target == "" { + return false + } + if dockerversion.IAMSTATIC == "true" { + if selfPath == "" { + return false + } + if target == selfPath { + return true + } + targetFileInfo, err := os.Lstat(target) + if err != nil { + return false + } + selfPathFileInfo, err := os.Lstat(selfPath) + if err != nil { + return false + } + return os.SameFile(targetFileInfo, selfPathFileInfo) + } + return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 +} + +// DockerInitPath figures out the path of our dockerinit (which may be SelfPath()) +func DockerInitPath(localCopy string) string { + selfPath := SelfPath() + if isValidDockerInitPath(selfPath, selfPath) { + // if we're valid, don't bother checking anything else + return selfPath + } + var possibleInits = []string{ + localCopy, + dockerversion.INITPATH, + filepath.Join(filepath.Dir(selfPath), "dockerinit"), + + // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." + // https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec + "/usr/libexec/docker/dockerinit", + "/usr/local/libexec/docker/dockerinit", + + // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." + // https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA + "/usr/lib/docker/dockerinit", + "/usr/local/lib/docker/dockerinit", + } + for _, dockerInit := range possibleInits { + if dockerInit == "" { + continue + } + path, err := exec.LookPath(dockerInit) + if err == nil { + path, err = filepath.Abs(path) + if err != nil { + // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? + panic(err) + } + if isValidDockerInitPath(path, selfPath) { + return path + } + } + } + return "" +} + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateNonCryptoID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// ReadDockerIgnore reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadDockerIgnore(path string) ([]string, error) { + // Note that a missing .dockerignore file isn't treated as an error + reader, err := os.Open(path) + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("Error reading '%s': %v", path, err) + } + return nil, nil + } + defer reader.Close() + + scanner := bufio.NewScanner(reader) + var excludes []string + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + excludes = append(excludes, pattern) + } + if err = scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading '%s': %v", path, err) + } + return excludes, nil +} + +// ImageReference combines `repo` and `ref` and returns a string representing +// the combination. If `ref` is a digest (meaning it's of the form +// :, the returned string is @. Otherwise, +// ref is assumed to be a tag, and the returned string is :. +func ImageReference(repo, ref string) string { + if DigestReference(ref) { + return repo + "@" + ref + } + return repo + ":" + ref +} + +// DigestReference returns true if ref is a digest reference; i.e. if it +// is of the form :. +func DigestReference(ref string) bool { + return strings.Contains(ref, ":") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go new file mode 100644 index 00000000..28630094 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go @@ -0,0 +1,100 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} + +func TestImageReference(t *testing.T) { + tests := []struct { + repo string + ref string + expected string + }{ + {"repo", "tag", "repo:tag"}, + {"repo", "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64", "repo@sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64"}, + } + + for i, test := range tests { + actual := ImageReference(test.repo, test.ref) + if test.expected != actual { + t.Errorf("%d: expected %q, got %q", i, test.expected, actual) + } + } +} + +func TestDigestReference(t *testing.T) { + input := "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64" + if !DigestReference(input) { + t.Errorf("Expected DigestReference=true for input %q", input) + } + + input = "latest" + if DigestReference(input) { + t.Errorf("Unexpected DigestReference=true for input %q", input) + } +} + +func TestReadDockerIgnore(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + diName := filepath.Join(tmpDir, ".dockerignore") + + di, err := ReadDockerIgnore(diName) + if err != nil { + t.Fatalf("Expected not to have error, got %s", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + di, err = ReadDockerIgnore(diName) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatalf("First element is not test1") + } + if di[1] != "/test2" { + t.Fatalf("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatalf("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatalf("Fourth element is not lastfile") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/adapter.go b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/adapter.go new file mode 100644 index 00000000..6846a3a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/adapter.go @@ -0,0 +1,60 @@ +package volumedrivers + +import "github.com/docker/docker/volume" + +type volumeDriverAdapter struct { + name string + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) { + err := a.proxy.Create(name) + if err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name}, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + driverName string + eMount string // ephemeral host volume path +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) > 0 { + return a.eMount + } + m, _ := a.proxy.Path(a.name) + return m +} + +func (a *volumeAdapter) Mount() (string, error) { + var err error + a.eMount, err = a.proxy.Mount(a.name) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount() error { + return a.proxy.Unmount(a.name) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/api.go b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/api.go new file mode 100644 index 00000000..ced82e08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/api.go @@ -0,0 +1,25 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type VolumeDriver -name VolumeDriver + +package volumedrivers + +import "github.com/docker/docker/volume" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name, proxy} +} + +// VolumeDriver defines the available functions that volume plugins must implement. +type VolumeDriver interface { + // Create a volume with the given name + Create(name string) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name string) (err error) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/extpoint.go b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/extpoint.go new file mode 100644 index 00000000..09bb7d43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/extpoint.go @@ -0,0 +1,67 @@ +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + drivers.Lock() + defer drivers.Unlock() + if name == "" { + return false + } + _, exists := drivers.extensions[name] + if exists { + return false + } + drivers.extensions[name] = extension + return true +} + +// Unregister dissociates the name from it's driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// Lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func Lookup(name string) (volume.Driver, error) { + drivers.Lock() + defer drivers.Unlock() + ext, ok := drivers.extensions[name] + if ok { + return ext, nil + } + pl, err := plugins.Get(name, "VolumeDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + d := NewVolumeDriver(name, pl.Client) + drivers.extensions[name] = d + return d, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy.go b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy.go new file mode 100644 index 00000000..9fd68855 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy.go @@ -0,0 +1,149 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import "errors" + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy_test.go b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy_test.go new file mode 100644 index 00000000..cadf8c0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/drivers/proxy_test.go @@ -0,0 +1,96 @@ +package volumedrivers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/tlsconfig" +) + +func TestVolumeRequestError(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot create volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Unknown volume"}`) + }) + + u, _ := url.Parse(server.URL) + client, err := plugins.NewClient("tcp://"+u.Host, tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatal(err) + } + + driver := volumeDriverProxy{client} + + if err = driver.Create("volume"); err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot create volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Mount("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot mount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Unmount("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot unmount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Remove("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot remove volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Path("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Unknown volume") { + t.Fatalf("Unexpected error: %v\n", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/local/local.go b/Godeps/_workspace/src/github.com/docker/docker/volume/local/local.go new file mode 100644 index 00000000..0507f07e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/local/local.go @@ -0,0 +1,202 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distintive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var oldVfsDir = filepath.Join("vfs", "dir") + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := os.MkdirAll(rootDirectory, 0700); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + for _, d := range dirs { + name := filepath.Base(d.Name()) + r.volumes[name] = &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + } + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string) (volume.Volume, error) { + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if !exists { + path := r.DataPath(name) + if err := os.MkdirAll(path, 0755); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, err + } + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + r.volumes[name] = v + } + v.use() + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + lv, ok := v.(*localVolume) + if !ok { + return errors.New("unknown volume type") + } + lv.release() + if lv.usedCount == 0 { + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + return err + } + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root: %s", realPath) + } + + if err := os.RemoveAll(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return os.RemoveAll(filepath.Dir(lv.path)) + } + return nil +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + usedCount int + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +func (v *localVolume) Mount() (string, error) { + return v.path, nil +} + +// Umount is for satisfying the localVolume interface and does not do anything in this driver. +func (v *localVolume) Unmount() error { + return nil +} + +func (v *localVolume) use() { + v.m.Lock() + v.usedCount++ + v.m.Unlock() +} + +func (v *localVolume) release() { + v.m.Lock() + v.usedCount-- + v.m.Unlock() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/volume/volume.go b/Godeps/_workspace/src/github.com/docker/docker/volume/volume.go new file mode 100644 index 00000000..19c9d77a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/volume/volume.go @@ -0,0 +1,61 @@ +package volume + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName string = "local" + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(string) (Volume, error) + // Remove deletes the volume. + Remove(Volume) error +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount() (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount() error +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "rw,Z": true, + "rw,z": true, + "z,rw": true, + "Z,rw": true, + "Z": true, + "z": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, + "ro,Z": true, + "ro,z": true, + "z,ro": true, + "Z,ro": true, +} + +// ValidateMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode and if it's read-write or not. +func ValidateMountMode(mode string) (bool, bool) { + return roModes[mode] || rwModes[mode], rwModes[mode] +} + +// ReadWrite tells you if a mode string is a valid read-only mode or not. +func ReadWrite(mode string) bool { + return rwModes[mode] +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/color_logger.go b/Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/color_logger.go new file mode 100644 index 00000000..04c5bcd6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/color_logger.go @@ -0,0 +1,70 @@ +package logger + +import ( + "fmt" + "os" + "strconv" + + "github.com/docker/libcompose/logger" + "golang.org/x/crypto/ssh/terminal" +) + +type ColorLoggerFactory struct { + maxLength int + tty bool +} + +type ColorLogger struct { + name string + colorPrefix string + factory *ColorLoggerFactory +} + +func NewColorLoggerFactory() *ColorLoggerFactory { + return &ColorLoggerFactory{ + tty: terminal.IsTerminal(int(os.Stdout.Fd())), + } +} + +func (c *ColorLoggerFactory) Create(name string) logger.Logger { + if c.maxLength < len(name) { + c.maxLength = len(name) + } + + return &ColorLogger{ + name: name, + factory: c, + colorPrefix: <-colorPrefix, + } +} + +func (c *ColorLogger) Out(bytes []byte) { + if len(bytes) == 0 { + return + } + logFmt, name := c.getLogFmt() + message := fmt.Sprintf(logFmt, name, string(bytes)) + fmt.Print(message) +} + +func (c *ColorLogger) Err(bytes []byte) { + if len(bytes) == 0 { + return + } + logFmt, name := c.getLogFmt() + message := fmt.Sprintf(logFmt, name, string(bytes)) + fmt.Fprint(os.Stderr, message) +} + +func (c *ColorLogger) getLogFmt() (string, string) { + pad := c.factory.maxLength + + logFmt := "%s | %s" + if c.factory.tty { + logFmt = c.colorPrefix + " %s" + } + + name := fmt.Sprintf("%-"+strconv.Itoa(pad)+"s", c.name) + + return logFmt, name +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/colors.go b/Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/colors.go new file mode 100644 index 00000000..21aa46a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/cli/logger/colors.go @@ -0,0 +1,34 @@ +package logger + +import "fmt" + +var ( + colorPrefix chan string = make(chan string) +) + +func generateColors() { + i := 0 + color_order := []string{ + "36", // cyan + "33", // yellow + "32", // green + "35", // magenta + "31", // red + "34", // blue + "36;1", // intense cyan + "33;1", // intense yellow + "32;1", // intense green + "35;1", // intense magenta + "31;1", // intense red + "34;1", // intense blue + } + + for { + colorPrefix <- fmt.Sprintf("\033[%sm%%s |\033[0m", color_order[i]) + i = (i + 1) % len(color_order) + } +} + +func init() { + go generateColors() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/builder.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/builder.go new file mode 100644 index 00000000..0d99a341 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/builder.go @@ -0,0 +1,157 @@ +package docker + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/utils" + "github.com/docker/libcompose/project" + "github.com/samalba/dockerclient" +) + +type Builder interface { + Build(p *project.Project, service project.Service) (string, error) +} + +type DaemonBuilder struct { + context *Context +} + +func NewDaemonBuilder(context *Context) *DaemonBuilder { + return &DaemonBuilder{ + context: context, + } +} + +func (d *DaemonBuilder) Build(p *project.Project, service project.Service) (string, error) { + if service.Config().Build == "" { + return service.Config().Image, nil + } + + tag := fmt.Sprintf("%s_%s", p.Name, service.Name()) + context, err := CreateTar(p, service.Name()) + if err != nil { + return "", err + } + + defer context.Close() + + client := d.context.ClientFactory.Create(service) + + logrus.Infof("Building %s...", tag) + output, err := client.BuildImage(&dockerclient.BuildImage{ + Context: context, + RepoName: tag, + Remove: true, + }) + if err != nil { + return "", err + } + + defer output.Close() + + // Don't really care about errors in the scanner + scanner := bufio.NewScanner(output) + for scanner.Scan() { + text := scanner.Text() + data := map[string]interface{}{} + err := json.Unmarshal([]byte(text), &data) + if stream, ok := data["stream"]; ok && err == nil { + fmt.Print(stream) + } + } + + return tag, nil +} + +func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { + // This code was ripped off from docker/api/client/build.go + + serviceConfig := p.Configs[name] + root := serviceConfig.Build + dockerfileName := filepath.Join(root, serviceConfig.Dockerfile) + + absRoot, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + filename := dockerfileName + + if dockerfileName == "" { + // No -f/--file was specified so use the default + dockerfileName = api.DefaultDockerfileName + filename = filepath.Join(absRoot, dockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(filename); os.IsNotExist(err) { + tmpFN := path.Join(absRoot, strings.ToLower(dockerfileName)) + if _, err = os.Lstat(tmpFN); err == nil { + dockerfileName = strings.ToLower(dockerfileName) + filename = tmpFN + } + } + } + + origDockerfile := dockerfileName // used for error msg + if filename, err = filepath.Abs(filename); err != nil { + return nil, err + } + + // Now reset the dockerfileName to be relative to the build context + dockerfileName, err = filepath.Rel(absRoot, filename) + if err != nil { + return nil, err + } + + // And canonicalize dockerfile name to a platform-independent one + dockerfileName, err = archive.CanonicalTarNameForPath(dockerfileName) + if err != nil { + return nil, fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err) + } + + if _, err = os.Lstat(filename); os.IsNotExist(err) { + return nil, fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) + } + var includes = []string{"."} + + excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore")) + if err != nil { + return nil, err + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(dockerfileName, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", dockerfileName) + } + + if err := utils.ValidateContextDirectory(root, excludes); err != nil { + return nil, fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) + } + + options := &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + } + + return archive.TarWithOptions(root, options) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/client.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/client.go new file mode 100644 index 00000000..c4ec8cb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/client.go @@ -0,0 +1,94 @@ +package docker + +import ( + "crypto/tls" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/samalba/dockerclient" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } +} + +type ClientOpts struct { + TLS bool + TLSVerify bool + TLSOptions tlsconfig.Options + TrustKey string + Host string +} + +func CreateClient(c ClientOpts) (dockerclient.Client, error) { + if c.TLSOptions.CAFile == "" { + c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile) + } + if c.TLSOptions.CertFile == "" { + c.TLSOptions.CertFile = filepath.Join(dockerCertPath, defaultCertFile) + } + if c.TLSOptions.KeyFile == "" { + c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile) + } + + if c.Host == "" { + defaultHost := os.Getenv("DOCKER_HOST") + if defaultHost == "" { + if runtime.GOOS != "windows" { + // If we do not have a host, default to unix socket + defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) + } else { + // If we do not have a host, default to TCP socket on Windows + defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) + } + } + defaultHost, err := opts.ValidateHost(defaultHost) + if err != nil { + return nil, err + } + c.Host = defaultHost + } + + if c.TrustKey == "" { + c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile) + } + + if c.TLSVerify { + c.TLS = true + } + + if c.TLS { + c.TLSOptions.InsecureSkipVerify = !c.TLSVerify + } + + var tlsConfig *tls.Config + + if c.TLS { + var err error + tlsConfig, err = tlsconfig.Client(c.TLSOptions) + if err != nil { + return nil, err + } + } + + return dockerclient.NewDockerClient(c.Host, tlsConfig) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/client_factory.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/client_factory.go new file mode 100644 index 00000000..d8eda264 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/client_factory.go @@ -0,0 +1,32 @@ +package docker + +import ( + "github.com/docker/libcompose/project" + "github.com/samalba/dockerclient" +) + +type ClientFactory interface { + // Create constructs a Docker client for the given service. The passed in + // config may be nil in which case a generic client for the project should + // be returned. + Create(service project.Service) dockerclient.Client +} + +type defaultClientFactory struct { + client dockerclient.Client +} + +func NewDefaultClientFactory(opts ClientOpts) (ClientFactory, error) { + client, err := CreateClient(opts) + if err != nil { + return nil, err + } + + return &defaultClientFactory{ + client: client, + }, nil +} + +func (s *defaultClientFactory) Create(service project.Service) dockerclient.Client { + return s.client +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/container.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/container.go new file mode 100644 index 00000000..1eeabb06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/container.go @@ -0,0 +1,487 @@ +package docker + +import ( + "bufio" + "fmt" + "math" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/libcompose/logger" + "github.com/docker/libcompose/project" + "github.com/samalba/dockerclient" +) + +type Container struct { + project.EmptyService + + name string + service *Service + client dockerclient.Client +} + +func NewContainer(client dockerclient.Client, name string, service *Service) *Container { + return &Container{ + client: client, + name: name, + service: service, + } +} + +func (c *Container) findExisting() (*dockerclient.Container, error) { + return GetContainerByName(c.client, c.name) +} + +func (c *Container) findInfo() (*dockerclient.ContainerInfo, error) { + container, err := c.findExisting() + if err != nil { + return nil, err + } + + return c.client.InspectContainer(container.Id) +} + +func (c *Container) Info() (project.Info, error) { + container, err := c.findExisting() + if err != nil { + return nil, err + } + + result := project.Info{} + + result = append(result, project.InfoPart{"Name", name(container.Names)}) + result = append(result, project.InfoPart{"Command", container.Command}) + result = append(result, project.InfoPart{"State", container.Status}) + result = append(result, project.InfoPart{"Ports", portString(container.Ports)}) + + return result, nil +} + +func portString(ports []dockerclient.Port) string { + result := []string{} + + for _, port := range ports { + if port.PublicPort > 0 { + result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + } else { + result = append(result, fmt.Sprintf("%d/%s", port.PrivatePort, port.Type)) + } + } + + return strings.Join(result, ", ") +} + +func name(names []string) string { + max := math.MaxInt32 + var current string + + for _, v := range names { + if len(v) < max { + max = len(v) + current = v + } + } + + return current[1:] +} + +func (c *Container) Rebuild(imageName string) (*dockerclient.Container, error) { + info, err := c.findInfo() + if err != nil { + return nil, err + } else if info == nil { + return nil, fmt.Errorf("Can not find container to rebuild for service: %s", c.service.Name()) + } + + hash := info.Config.Labels[HASH.Str()] + if hash == "" { + return nil, fmt.Errorf("Failed to find hash on old container: %s", info.Name) + } + + name := info.Name[1:] + new_name := fmt.Sprintf("%s_%s", name, info.Id[:12]) + logrus.Debugf("Renaming %s => %s", name, new_name) + if err := c.client.RenameContainer(name, new_name); err != nil { + return nil, err + } + + newContainer, err := c.createContainer(imageName, info.Id) + if err != nil { + return nil, err + } + logrus.Debugf("Created replacement container %s", newContainer.Id) + + if err := c.client.RemoveContainer(info.Id, true, false); err != nil { + logrus.Errorf("Failed to remove old container %s", c.name) + return nil, err + } + + logrus.Debugf("Removed old container %s %s", c.name, info.Id) + + return newContainer, nil +} + +func (c *Container) Create(imageName string) (*dockerclient.Container, error) { + container, err := c.findExisting() + if err != nil { + return nil, err + } + + if container == nil { + container, err = c.createContainer(imageName, "") + if err != nil { + return nil, err + } + c.service.context.Project.Notify(project.CONTAINER_CREATED, c.service.Name(), map[string]string{ + "name": c.Name(), + }) + } + + return container, err +} + +func (c *Container) Down() error { + return c.withContainer(func(container *dockerclient.Container) error { + return c.client.StopContainer(container.Id, c.service.context.Timeout) + }) +} + +func (c *Container) Kill() error { + return c.withContainer(func(container *dockerclient.Container) error { + return c.client.KillContainer(container.Id, c.service.context.Signal) + }) +} + +func (c *Container) Delete() error { + container, err := c.findExisting() + if err != nil || container == nil { + return err + } + + info, err := c.client.InspectContainer(container.Id) + if err != nil { + return err + } + + if info.State.Running { + err := c.client.StopContainer(container.Id, c.service.context.Timeout) + if err != nil { + return err + } + } + + return c.client.RemoveContainer(container.Id, true, false) +} + +func (c *Container) Up(imageName string) error { + var err error + + defer func() { + if err == nil && c.service.context.Log { + go c.Log() + } + }() + + container, err := c.Create(imageName) + if err != nil { + return err + } + + info, err := c.client.InspectContainer(container.Id) + if err != nil { + return err + } + + if !info.State.Running { + logrus.Debugf("Starting container: %s: %#v", container.Id) + err := c.client.StartContainer(container.Id, nil) + return err + + c.service.context.Project.Notify(project.CONTAINER_STARTED, c.service.Name(), map[string]string{ + "name": c.Name(), + }) + } + + return nil +} + +func (c *Container) OutOfSync(imageName string) (bool, error) { + container, err := c.findExisting() + if err != nil || container == nil { + return false, err + } + + info, err := c.client.InspectContainer(container.Id) + if err != nil { + return false, err + } + + return info.Config.Labels[HASH.Str()] != c.getHash(imageName), nil +} + +func (c *Container) getHash(imageName string) string { + serviceConfig := *c.service.Config() + imageInfo, err := c.client.InspectImage(imageName) + if imageInfo != nil && err == nil { + serviceConfig.Image = imageInfo.Id + } else { + serviceConfig.Image = imageName + } + + return project.GetServiceHash(c.service.Name(), serviceConfig) +} + +func (c *Container) createContainer(imageName, oldContainer string) (*dockerclient.Container, error) { + config, err := ConvertToApi(c.service.serviceConfig) + if err != nil { + return nil, err + } + + config.Image = imageName + + if config.Labels == nil { + config.Labels = map[string]string{} + } + + config.Labels[NAME.Str()] = c.name + config.Labels[SERVICE.Str()] = c.service.name + config.Labels[PROJECT.Str()] = c.service.context.Project.Name + config.Labels[HASH.Str()] = c.getHash(imageName) + + err = c.populateAdditionalHostConfig(&config.HostConfig) + if err != nil { + return nil, err + } + + if oldContainer != "" { + config.HostConfig.VolumesFrom = append(config.HostConfig.VolumesFrom, oldContainer) + } + + logrus.Debugf("Creating container %s %#v", c.name, config) + + id, err := c.client.CreateContainer(config, c.name) + if err != nil && err.Error() == "Not found" { + err = c.pull(config.Image) + } + + if err != nil { + logrus.Debugf("Failed to create container %s: %v", c.name, err) + return nil, err + } + + return GetContainerById(c.client, id) +} + +func (c *Container) populateAdditionalHostConfig(hostConfig *dockerclient.HostConfig) error { + links := map[string]string{} + + for _, link := range c.service.DependentServices() { + if _, ok := c.service.context.Project.Configs[link.Target]; !ok { + continue + } + + service, err := c.service.context.Project.CreateService(link.Target) + if err != nil { + return err + } + + containers, err := service.Containers() + if err != nil { + return err + } + + if link.Type == project.REL_TYPE_LINK { + c.addLinks(links, service, link, containers) + } else if link.Type == project.REL_TYPE_IPC_NAMESPACE { + hostConfig, err = c.addIpc(hostConfig, service, containers) + } else if link.Type == project.REL_TYPE_NET_NAMESPACE { + hostConfig, err = c.addNetNs(hostConfig, service, containers) + } + + if err != nil { + return err + } + } + + hostConfig.Links = []string{} + for k, v := range links { + hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":")) + } + for _, v := range c.service.Config().ExternalLinks { + hostConfig.Links = append(hostConfig.Links, v) + } + + return nil +} + +func (c *Container) addLinks(links map[string]string, service project.Service, rel project.ServiceRelationship, containers []project.Container) { + for _, container := range containers { + if _, ok := links[rel.Alias]; !ok { + links[rel.Alias] = container.Name() + } + + links[container.Name()] = container.Name() + } +} + +func (c *Container) addIpc(config *dockerclient.HostConfig, service project.Service, containers []project.Container) (*dockerclient.HostConfig, error) { + if len(containers) == 0 { + return nil, fmt.Errorf("Failed to find container for IPC %", c.service.Config().Ipc) + } + + id, err := containers[0].Id() + if err != nil { + return nil, err + } + + config.IpcMode = "container:" + id + return config, nil +} + +func (c *Container) addNetNs(config *dockerclient.HostConfig, service project.Service, containers []project.Container) (*dockerclient.HostConfig, error) { + if len(containers) == 0 { + return nil, fmt.Errorf("Failed to find container for networks ns %", c.service.Config().Net) + } + + id, err := containers[0].Id() + if err != nil { + return nil, err + } + + config.NetworkMode = "container:" + id + return config, nil +} + +func (c *Container) Id() (string, error) { + container, err := c.findExisting() + if container == nil { + return "", err + } else { + return container.Id, err + } +} + +func (c *Container) Name() string { + return c.name +} + +func (c *Container) Pull() error { + return c.pull(c.service.serviceConfig.Image) +} + +func (c *Container) Restart() error { + container, err := c.findExisting() + if err != nil || container == nil { + return err + } + + return c.client.RestartContainer(container.Id, c.service.context.Timeout) +} + +func (c *Container) Log() error { + container, err := c.findExisting() + if container == nil || err != nil { + return err + } + + info, err := c.client.InspectContainer(container.Id) + if info == nil || err != nil { + return err + } + + l := c.service.context.LoggerFactory.Create(c.name) + + output, err := c.client.ContainerLogs(container.Id, &dockerclient.LogOptions{ + Follow: true, + Stdout: true, + Stderr: true, + Tail: 10, + }) + if err != nil { + return err + } + + if info.Config.Tty { + scanner := bufio.NewScanner(output) + for scanner.Scan() { + l.Out([]byte(scanner.Text() + "\n")) + } + return scanner.Err() + } else { + _, err := stdcopy.StdCopy(&logger.LoggerWrapper{ + Logger: l, + }, &logger.LoggerWrapper{ + Err: true, + Logger: l, + }, output) + return err + } + + return nil +} + +func (c *Container) pull(image string) error { + taglessRemote, tag := parsers.ParseRepositoryTag(image) + if tag == "" { + image = utils.ImageReference(taglessRemote, tags.DEFAULTTAG) + } + + repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + if err != nil { + return err + } + + authConfig := cliconfig.AuthConfig{} + if c.service.context.ConfigFile != nil && repoInfo != nil && repoInfo.Index != nil { + authConfig = registry.ResolveAuthConfig(c.service.context.ConfigFile, repoInfo.Index) + } + + err = c.client.PullImage(image, &dockerclient.AuthConfig{ + Username: authConfig.Username, + Password: authConfig.Password, + Email: authConfig.Email, + }) + + if err != nil { + logrus.Errorf("Failed to pull image %s: %v", image, err) + } + + return err +} + +func (c *Container) withContainer(action func(*dockerclient.Container) error) error { + container, err := c.findExisting() + if err != nil { + return err + } + + if container != nil { + return action(container) + } + + return nil +} + +func (c *Container) Port(port string) (string, error) { + info, err := c.findInfo() + if err != nil { + return "", err + } + + if bindings, ok := info.NetworkSettings.Ports[port]; ok { + result := []string{} + for _, binding := range bindings { + result = append(result, binding.HostIp+":"+binding.HostPort) + } + + return strings.Join(result, "\n"), nil + } else { + return "", nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/context.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/context.go new file mode 100644 index 00000000..c2474b54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/context.go @@ -0,0 +1,33 @@ +package docker + +import ( + "github.com/docker/docker/cliconfig" + "github.com/docker/libcompose/project" +) + +type Context struct { + project.Context + Builder Builder + ClientFactory ClientFactory + ConfigDir string + ConfigFile *cliconfig.ConfigFile +} + +func (c *Context) open() error { + return c.LookupConfig() +} + +func (c *Context) LookupConfig() error { + if c.ConfigFile != nil { + return nil + } + + config, err := cliconfig.Load(c.ConfigDir) + if err != nil { + return err + } + + c.ConfigFile = config + + return nil +} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/docker/convert.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/convert.go similarity index 73% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/docker/convert.go rename to Godeps/_workspace/src/github.com/docker/libcompose/docker/convert.go index 0df379c0..937805ec 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/docker/convert.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/convert.go @@ -3,9 +3,12 @@ package docker import ( "strings" - "github.com/docker/docker/nat" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/nat" "github.com/docker/docker/runconfig" - "github.com/rancherio/rancher-compose/librcompose/project" + "github.com/docker/libcompose/project" + "github.com/docker/libcompose/utils" + "github.com/samalba/dockerclient" ) func Filter(vs []string, f func(string) bool) []string { @@ -26,6 +29,26 @@ func isVolume(s string) bool { return !isBind(s) } +func ConvertToApi(c *project.ServiceConfig) (*dockerclient.ContainerConfig, error) { + config, hostConfig, err := Convert(c) + if err != nil { + return nil, err + } + + var result dockerclient.ContainerConfig + err = utils.ConvertByJson(config, &result) + if err != nil { + logrus.Errorf("Failed to convert config to API structure: %v\n%#v", err, config) + return nil, err + } + + err = utils.ConvertByJson(hostConfig, &result.HostConfig) + if err != nil { + logrus.Errorf("Failed to convert hostConfig to API structure: %v\n%#v", err, hostConfig) + } + return &result, err +} + func Convert(c *project.ServiceConfig) (*runconfig.Config, *runconfig.HostConfig, error) { vs := Filter(c.Volumes, isVolume) @@ -74,16 +97,16 @@ func Convert(c *project.ServiceConfig) (*runconfig.Config, *runconfig.HostConfig } host_config := &runconfig.HostConfig{ VolumesFrom: c.VolumesFrom, - CapAdd: c.CapAdd, - CapDrop: c.CapDrop, - CpuShares: c.CpuShares, + CapAdd: runconfig.NewCapList(c.CapAdd), + CapDrop: runconfig.NewCapList(c.CapDrop), + CPUShares: c.CpuShares, CpusetCpus: c.CpuSet, ExtraHosts: c.ExtraHosts, Privileged: c.Privileged, Binds: Filter(c.Volumes, isBind), Devices: deviceMappings, - Dns: c.Dns.Slice(), - DnsSearch: c.DnsSearch.Slice(), + DNS: c.Dns.Slice(), + DNSSearch: c.DnsSearch.Slice(), LogConfig: runconfig.LogConfig{ Type: c.LogDriver, Config: c.LogOpt, diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/docker/convert_test.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/convert_test.go similarity index 92% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/docker/convert_test.go rename to Godeps/_workspace/src/github.com/docker/libcompose/docker/convert_test.go index c4c99d87..cbdcfa35 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/docker/convert_test.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/convert_test.go @@ -2,7 +2,7 @@ package docker import ( shlex "github.com/flynn/go-shlex" - "github.com/rancherio/rancher-compose/librcompose/project" + "github.com/docker/libcompose/project" "github.com/stretchr/testify/assert" "testing" ) diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/functions.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/functions.go new file mode 100644 index 00000000..ccee5077 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/functions.go @@ -0,0 +1,49 @@ +package docker + +import "github.com/samalba/dockerclient" + +func GetContainersByFilter(client dockerclient.Client, filter ...string) ([]dockerclient.Container, error) { + filterResult := "" + + for _, value := range filter { + if filterResult == "" { + filterResult = value + } else { + filterResult = And(filterResult, value) + } + } + + return client.ListContainers(true, false, filterResult) +} + +func GetContainerByName(client dockerclient.Client, name string) (*dockerclient.Container, error) { + containers, err := client.ListContainers(true, false, NAME.Eq(name)) + if err != nil { + return nil, err + } + + if len(containers) == 0 { + return nil, nil + } + + return &containers[0], nil +} + +func GetContainerById(client dockerclient.Client, id string) (*dockerclient.Container, error) { + containers, err := client.ListContainers(true, false, "") + if err != nil { + return nil, err + } + + if len(containers) == 0 { + return nil, nil + } + + for _, c := range containers { + if c.Id == id { + return &c, nil + } + } + + return nil, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/labels.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/labels.go new file mode 100644 index 00000000..49b7a551 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/labels.go @@ -0,0 +1,47 @@ +package docker + +import ( + "encoding/json" + + "github.com/docker/libcompose/utils" +) + +type Label string + +const ( + NAME = Label("io.docker.compose.name") + PROJECT = Label("io.docker.compose.project") + SERVICE = Label("io.docker.compose.service") + HASH = Label("io.docker.compose.config-hash") + REBUILD = Label("io.docker.compose.rebuild") +) + +func (f Label) Eq(value string) string { + return utils.LabelFilter(string(f), value) +} + +func And(left, right string) string { + leftMap := map[string][]string{} + rightMap := map[string][]string{} + + // Ignore errors + json.Unmarshal([]byte(left), &leftMap) + json.Unmarshal([]byte(right), &rightMap) + + for k, v := range rightMap { + existing, ok := leftMap[k] + if ok { + leftMap[k] = append(existing, v...) + } else { + leftMap[k] = v + } + } + + result, _ := json.Marshal(leftMap) + + return string(result) +} + +func (f Label) Str() string { + return string(f) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/name.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/name.go new file mode 100644 index 00000000..a85a0519 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/name.go @@ -0,0 +1,68 @@ +package docker + +import ( + "fmt" + "io" + "time" + + "github.com/samalba/dockerclient" +) + +const format = "%s_%s_%d" + +type Namer interface { + io.Closer + Next() string +} + +type inOrderNamer struct { + names chan string + done chan bool +} + +func OneName(client dockerclient.Client, project, service string) (string, error) { + namer := NewNamer(client, project, service) + defer namer.Close() + + return namer.Next(), nil +} + +func NewNamer(client dockerclient.Client, project, service string) Namer { + namer := &inOrderNamer{ + names: make(chan string), + done: make(chan bool), + } + + go func() { + for i := 1; true; i++ { + name := fmt.Sprintf(format, project, service, i) + c, err := GetContainerByName(client, name) + if err != nil { + // Sleep here to avoid crazy tight loop when things go south + time.Sleep(time.Second * 1) + continue + } + if c != nil { + continue + } + + select { + case namer.names <- name: + case <-namer.done: + close(namer.names) + return + } + } + }() + + return namer +} + +func (i *inOrderNamer) Next() string { + return <-i.names +} + +func (i *inOrderNamer) Close() error { + close(i.done) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/project.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/project.go new file mode 100644 index 00000000..d272e02b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/project.go @@ -0,0 +1,50 @@ +package docker + +import ( + "github.com/Sirupsen/logrus" + + "github.com/docker/libcompose/lookup" + "github.com/docker/libcompose/project" +) + +func NewProject(context *Context) (*project.Project, error) { + if context.ConfigLookup == nil { + context.ConfigLookup = &lookup.FileConfigLookup{} + } + + if context.EnvironmentLookup == nil { + context.EnvironmentLookup = &lookup.OsEnvLookup{} + } + + if context.ServiceFactory == nil { + context.ServiceFactory = &ServiceFactory{ + context: context, + } + } + + if context.Builder == nil { + context.Builder = NewDaemonBuilder(context) + } + + if context.ClientFactory == nil { + if factory, err := NewDefaultClientFactory(ClientOpts{}); err != nil { + return nil, err + } else { + context.ClientFactory = factory + } + } + + p := project.NewProject(&context.Context) + + err := p.Parse() + if err != nil { + return nil, err + } + + if err = context.open(); err != nil { + logrus.Errorf("Failed to open project %s: %v", p.Name, err) + return nil, err + } + + return p, err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/service.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/service.go new file mode 100644 index 00000000..d6cdf8ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/service.go @@ -0,0 +1,300 @@ +package docker + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/project" + "github.com/docker/libcompose/utils" +) + +type Service struct { + name string + serviceConfig *project.ServiceConfig + context *Context + imageName string +} + +func NewService(name string, serviceConfig *project.ServiceConfig, context *Context) *Service { + return &Service{ + name: name, + serviceConfig: serviceConfig, + context: context, + } +} + +func (s *Service) Name() string { + return s.name +} + +func (s *Service) Config() *project.ServiceConfig { + return s.serviceConfig +} + +func (s *Service) DependentServices() []project.ServiceRelationship { + return project.DefaultDependentServices(s.context.Project, s) +} + +func (s *Service) Create() error { + _, err := s.createOne() + return err +} + +func (s *Service) collectContainers() ([]*Container, error) { + client := s.context.ClientFactory.Create(s) + containers, err := GetContainersByFilter(client, SERVICE.Eq(s.name), PROJECT.Eq(s.context.Project.Name)) + if err != nil { + return nil, err + } + + result := []*Container{} + + if len(containers) == 0 { + return result, nil + } + + imageName, err := s.build() + if err != nil { + return nil, err + } + + for _, container := range containers { + name := container.Labels[NAME.Str()] + c := NewContainer(client, name, s) + if outOfSync, err := c.OutOfSync(imageName); err != nil { + return nil, err + } else if outOfSync && s.context.Rebuild && s.Config().Labels.MapParts()[REBUILD.Str()] != "false" { + logrus.Infof("Rebuilding %s", name) + if _, err := c.Rebuild(imageName); err != nil { + return nil, err + } + } else if outOfSync { + logrus.Warnf("%s needs rebuilding", name) + } + + result = append(result, c) + } + + return result, nil +} + +func (s *Service) createOne() (*Container, error) { + containers, err := s.constructContainers(true, 1) + if err != nil { + return nil, err + } + + return containers[0], err +} + +func (s *Service) Build() error { + _, err := s.build() + return err +} + +func (s *Service) build() (string, error) { + if s.imageName != "" { + return s.imageName, nil + } + + if s.context.Builder == nil { + s.imageName = s.Config().Image + } else { + var err error + s.imageName, err = s.context.Builder.Build(s.context.Project, s) + if err != nil { + return "", err + } + } + + return s.imageName, nil +} + +func (s *Service) constructContainers(create bool, count int) ([]*Container, error) { + result, err := s.collectContainers() + if err != nil { + return nil, err + } + + client := s.context.ClientFactory.Create(s) + + namer := NewNamer(client, s.context.Project.Name, s.name) + defer namer.Close() + + for i := len(result); i < count; i++ { + containerName := namer.Next() + + c := NewContainer(client, containerName, s) + + if create { + imageName, err := s.build() + if err != nil { + return nil, err + } + + dockerContainer, err := c.Create(imageName) + if err != nil { + return nil, err + } else { + logrus.Debugf("Created container %s: %v", dockerContainer.Id, dockerContainer.Names) + } + } + + result = append(result, NewContainer(client, containerName, s)) + } + + return result, nil +} + +func (s *Service) Up() error { + imageName, err := s.build() + if err != nil { + return err + } + + return s.up(imageName, true) +} + +func (s *Service) Info() (project.InfoSet, error) { + result := project.InfoSet{} + containers, err := s.collectContainers() + if err != nil { + return nil, err + } + + for _, c := range containers { + if info, err := c.Info(); err != nil { + return nil, err + } else { + result = append(result, info) + } + } + + return result, nil +} + +func (s *Service) Start() error { + return s.up("", false) +} + +func (s *Service) up(imageName string, create bool) error { + containers, err := s.collectContainers() + if err != nil { + return err + } + + logrus.Debugf("Found %d existing containers for service %s", len(containers), s.name) + + if len(containers) == 0 && create { + c, err := s.createOne() + if err != nil { + return err + } + containers = []*Container{c} + } + + return s.eachContainer(func(c *Container) error { + return c.Up(imageName) + }) +} + +func (s *Service) eachContainer(action func(*Container) error) error { + containers, err := s.collectContainers() + if err != nil { + return err + } + + tasks := utils.InParallel{} + for _, container := range containers { + task := func(container *Container) func() error { + return func() error { + return action(container) + } + }(container) + + tasks.Add(task) + } + + return tasks.Wait() +} + +func (s *Service) Down() error { + return s.eachContainer(func(c *Container) error { + return c.Down() + }) +} + +func (s *Service) Restart() error { + return s.eachContainer(func(c *Container) error { + return c.Restart() + }) +} + +func (s *Service) Kill() error { + return s.eachContainer(func(c *Container) error { + return c.Kill() + }) +} + +func (s *Service) Delete() error { + return s.eachContainer(func(c *Container) error { + return c.Delete() + }) +} + +func (s *Service) Log() error { + return s.eachContainer(func(c *Container) error { + return c.Log() + }) +} + +func (s *Service) Scale(scale int) error { + foundCount := 0 + err := s.eachContainer(func(c *Container) error { + foundCount++ + if foundCount > scale { + err := c.Down() + if err != nil { + return err + } + + return c.Delete() + } + return nil + }) + + if err != nil { + return err + } + + if foundCount != scale { + _, err := s.constructContainers(true, scale) + if err != nil { + return err + } + + } + + return s.up("", false) +} + +func (s *Service) Pull() error { + containers, err := s.constructContainers(false, 1) + if err != nil { + return err + } + + return containers[0].Pull() +} + +func (s *Service) Containers() ([]project.Container, error) { + result := []project.Container{} + containers, err := s.collectContainers() + if err != nil { + return nil, err + } + + for _, c := range containers { + result = append(result, c) + } + + return result, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/docker/service_factory.go b/Godeps/_workspace/src/github.com/docker/libcompose/docker/service_factory.go new file mode 100644 index 00000000..70c5a402 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/docker/service_factory.go @@ -0,0 +1,11 @@ +package docker + +import "github.com/docker/libcompose/project" + +type ServiceFactory struct { + context *Context +} + +func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { + return NewService(name, serviceConfig, s.context), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/logger/null.go b/Godeps/_workspace/src/github.com/docker/libcompose/logger/null.go new file mode 100644 index 00000000..07f7bcf6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/logger/null.go @@ -0,0 +1,14 @@ +package logger + +type NullLogger struct { +} + +func (n *NullLogger) Out(_ []byte) { +} + +func (n *NullLogger) Err(_ []byte) { +} + +func (n *NullLogger) Create(_ string) Logger { + return &NullLogger{} +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/logger/types.go b/Godeps/_workspace/src/github.com/docker/libcompose/logger/types.go new file mode 100644 index 00000000..9a6868a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/logger/types.go @@ -0,0 +1,24 @@ +package logger + +type Factory interface { + Create(name string) Logger +} + +type Logger interface { + Out(bytes []byte) + Err(bytes []byte) +} + +type LoggerWrapper struct { + Err bool + Logger Logger +} + +func (l *LoggerWrapper) Write(bytes []byte) (int, error) { + if l.Err { + l.Logger.Err(bytes) + } else { + l.Logger.Out(bytes) + } + return len(bytes), nil +} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/config_lookup.go b/Godeps/_workspace/src/github.com/docker/libcompose/lookup/file.go similarity index 81% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/config_lookup.go rename to Godeps/_workspace/src/github.com/docker/libcompose/lookup/file.go index ae00b52d..96cce945 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/config_lookup.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/lookup/file.go @@ -1,4 +1,4 @@ -package project +package lookup import ( "io/ioutil" @@ -11,7 +11,7 @@ import ( type FileConfigLookup struct { } -func (f FileConfigLookup) Lookup(file, relativeTo string) ([]byte, string, error) { +func (f *FileConfigLookup) Lookup(file, relativeTo string) ([]byte, string, error) { if strings.HasPrefix(file, "/") { logrus.Debugf("Reading file %s", file) bytes, err := ioutil.ReadFile(file) diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/lookup/simple_env.go b/Godeps/_workspace/src/github.com/docker/libcompose/lookup/simple_env.go new file mode 100644 index 00000000..ab48ea57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/lookup/simple_env.go @@ -0,0 +1,20 @@ +package lookup + +import ( + "fmt" + "os" + + "github.com/docker/libcompose/project" +) + +type OsEnvLookup struct { +} + +func (o *OsEnvLookup) Lookup(key, serviceName string, config *project.ServiceConfig) []string { + ret := os.Getenv(key) + if ret == "" { + return []string{} + } else { + return []string{fmt.Sprintf("%s=%s", key, ret)} + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/project/context.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/context.go new file mode 100644 index 00000000..53d6c643 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/context.go @@ -0,0 +1,132 @@ +package project + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/logger" +) + +var projectRegexp = regexp.MustCompile("[^a-zA-Z0-9_.-]") + +type Context struct { + Timeout int + Log bool + Rebuild bool + Signal string + ComposeFile string + ComposeBytes []byte + ProjectName string + isOpen bool + ServiceFactory ServiceFactory + EnvironmentLookup EnvironmentLookup + ConfigLookup ConfigLookup + LoggerFactory logger.Factory + IgnoreMissingConfig bool + Project *Project +} + +func (c *Context) readComposeFile() error { + if c.ComposeBytes != nil { + return nil + } + + logrus.Debugf("Opening compose file: %s", c.ComposeFile) + + if c.ComposeFile == "-" { + if composeBytes, err := ioutil.ReadAll(os.Stdin); err != nil { + logrus.Errorf("Failed to read compose file from stdin: %v", err) + return err + } else { + c.ComposeBytes = composeBytes + } + } else if c.ComposeFile != "" { + if composeBytes, err := ioutil.ReadFile(c.ComposeFile); os.IsNotExist(err) { + if c.IgnoreMissingConfig { + return nil + } + logrus.Errorf("Failed to find %s", c.ComposeFile) + return err + } else if err != nil { + logrus.Errorf("Failed to open %s", c.ComposeFile) + return err + } else { + c.ComposeBytes = composeBytes + } + } + + return nil +} + +func (c *Context) determineProject() error { + name, err := c.lookupProjectName() + if err != nil { + return err + } + + c.ProjectName = projectRegexp.ReplaceAllString(strings.ToLower(name), "-") + + if c.ProjectName == "" { + return fmt.Errorf("Falied to determine project name") + } + + if strings.ContainsAny(c.ProjectName[0:1], "_.-") { + c.ProjectName = "x" + c.ProjectName + } + + return nil +} + +func (c *Context) lookupProjectName() (string, error) { + if c.ProjectName != "" { + return c.ProjectName, nil + } + + if envProject := os.Getenv("COMPOSE_PROJECT_NAME"); envProject != "" { + return envProject, nil + } + + f, err := filepath.Abs(c.ComposeFile) + if err != nil { + logrus.Errorf("Failed to get absolute directory for: %s", c.ComposeFile) + return "", err + } + + f = toUnixPath(f) + + parent := path.Base(path.Dir(f)) + if parent != "" && parent != "." { + return parent, nil + } else if wd, err := os.Getwd(); err != nil { + return "", err + } else { + return path.Base(toUnixPath(wd)), nil + } +} + +func toUnixPath(p string) string { + return strings.Replace(p, "\\", "/", -1) +} + +func (c *Context) open() error { + if c.isOpen { + return nil + } + + if err := c.readComposeFile(); err != nil { + return err + } + + if err := c.determineProject(); err != nil { + return err + } + + c.isOpen = true + return nil +} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/empty.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/empty.go similarity index 53% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/empty.go rename to Godeps/_workspace/src/github.com/docker/libcompose/project/empty.go index ec5c5942..431d9758 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/empty.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/empty.go @@ -7,10 +7,18 @@ func (e *EmptyService) Create() error { return nil } +func (e *EmptyService) Build() error { + return nil +} + func (e *EmptyService) Up() error { return nil } +func (e *EmptyService) Start() error { + return nil +} + func (e *EmptyService) Down() error { return nil } @@ -25,8 +33,24 @@ func (e *EmptyService) Restart() error { func (e *EmptyService) Log() error { return nil - } + +func (e *EmptyService) Pull() error { + return nil +} + +func (e *EmptyService) Kill() error { + return nil +} + +func (e *EmptyService) Containers() ([]Container, error) { + return []Container{}, nil +} + func (e *EmptyService) Scale(count int) error { return nil } + +func (e *EmptyService) Info() (InfoSet, error) { + return InfoSet{}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/project/hash.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/hash.go new file mode 100644 index 00000000..211cc975 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/hash.go @@ -0,0 +1,105 @@ +package project + +import ( + "crypto/sha1" + "encoding/hex" + "fmt" + "io" + "reflect" + "sort" +) + +func GetServiceHash(name string, config ServiceConfig) string { + hash := sha1.New() + + io.WriteString(hash, name) + + //Get values of Service through reflection + val := reflect.ValueOf(&config).Elem() + + //Create slice to sort the keys in Service Config, which allow constant hash ordering + serviceKeys := []string{} + + //Create a data structure of map of values keyed by a string + unsortedKeyValue := make(map[string]interface{}) + + //Get all keys and values in Service Configuration + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + keyField := val.Type().Field(i) + + serviceKeys = append(serviceKeys, keyField.Name) + unsortedKeyValue[keyField.Name] = valueField.Interface() + } + + //Sort serviceKeys alphabetically + sort.Strings(serviceKeys) + + //Go through keys and write hash + for _, serviceKey := range serviceKeys { + serviceValue := unsortedKeyValue[serviceKey] + + io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey)) + + switch s := serviceValue.(type) { + case SliceorMap: + sliceKeys := []string{} + for lkey := range s.MapParts() { + if lkey != "io.rancher.os.hash" { + sliceKeys = append(sliceKeys, lkey) + } + } + sort.Strings(sliceKeys) + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey])) + } + case MaporEqualSlice: + sliceKeys := s.Slice() + // do not sort keys as the order matters + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) + } + case MaporColonSlice: + sliceKeys := s.Slice() + // do not sort keys as the order matters + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) + } + case MaporSpaceSlice: + sliceKeys := s.Slice() + // do not sort keys as the order matters + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) + } + case Command: + sliceKeys := s.Slice() + // do not sort keys as the order matters + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) + } + case Stringorslice: + sliceKeys := s.Slice() + sort.Strings(sliceKeys) + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) + } + case []string: + sliceKeys := s + sort.Strings(sliceKeys) + + for _, sliceKey := range sliceKeys { + io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) + } + default: + io.WriteString(hash, fmt.Sprintf("%v", serviceValue)) + } + } + + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/project/info.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/info.go new file mode 100644 index 00000000..6a5c996e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/info.go @@ -0,0 +1,42 @@ +package project + +import ( + "bytes" + "io" + "text/tabwriter" +) + +func (infos InfoSet) String() string { + //no error checking, none of this should fail + buffer := bytes.NewBuffer(make([]byte, 0, 1024)) + tabwriter := tabwriter.NewWriter(buffer, 4, 4, 2, ' ', 0) + + first := true + for _, info := range infos { + if first { + writeLine(tabwriter, true, info) + } + first = false + writeLine(tabwriter, false, info) + } + + tabwriter.Flush() + return buffer.String() +} + +func writeLine(writer io.Writer, key bool, info Info) { + first := true + for _, part := range info { + if !first { + writer.Write([]byte{'\t'}) + } + first = false + if key { + writer.Write([]byte(part.Key)) + } else { + writer.Write([]byte(part.Value)) + } + } + + writer.Write([]byte{'\n'}) +} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/listener.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/listener.go similarity index 88% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/listener.go rename to Godeps/_workspace/src/github.com/docker/libcompose/project/listener.go index 4c6a66f5..1b98c8b1 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/listener.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/listener.go @@ -33,6 +33,15 @@ type defaultListener struct { upCount int } +func NewDefaultListener(p *Project) chan<- ProjectEvent { + l := defaultListener{ + listenChan: make(chan ProjectEvent), + project: p, + } + go l.start() + return l.listenChan +} + func (d *defaultListener) start() { for event := range d.listenChan { buffer := bytes.NewBuffer(nil) diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/merge.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/merge.go similarity index 88% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/merge.go rename to Godeps/_workspace/src/github.com/docker/libcompose/project/merge.go index 535fa0dc..a121c921 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/merge.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/merge.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/Sirupsen/logrus" - "github.com/rancherio/rancher-compose/librcompose/util" + "github.com/docker/libcompose/utils" "gopkg.in/yaml.v2" ) @@ -20,9 +20,9 @@ var ( "http:", "https:", } - ignore = map[string]bool{ - "links": true, - "volumes_from": true, + noMerge = []string{ + "links", + "volumes_from", } ) @@ -39,7 +39,7 @@ func Merge(p *Project, bytes []byte) (map[string]*ServiceConfig, error) { } for name, data := range datas { - data, err := parse(p.ConfigLookup, p.File, data, datas) + data, err := parse(p.context.ConfigLookup, p.File, data, datas) if err != nil { logrus.Errorf("Failed to parse service %s: %v", name, err) return nil, err @@ -48,14 +48,14 @@ func Merge(p *Project, bytes []byte) (map[string]*ServiceConfig, error) { datas[name] = data } - err = util.Convert(datas, &configs) + err = utils.Convert(datas, &configs) return configs, err } func readEnvFile(configLookup ConfigLookup, inFile string, serviceData rawService) (rawService, error) { var config ServiceConfig - if err := util.Convert(serviceData, &config); err != nil { + if err := utils.Convert(serviceData, &config); err != nil { return nil, err } @@ -202,13 +202,19 @@ func parse(configLookup ConfigLookup, inFile string, serviceData rawService, dat baseService = clone(baseService) - fmt.Printf("merging\n%#v\n %#v\n", baseService, serviceData) + logrus.Debugf("Merging %#v, %#v", baseService, serviceData) + + for _, k := range noMerge { + if _, ok := baseService[k]; ok { + source := file + if source == "" { + source = inFile + } + return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k) + } + } for k, v := range serviceData { - if ignore[k] { - continue - } - existing, ok := baseService[k] if ok { baseService[k] = merge(existing, v) @@ -217,7 +223,7 @@ func parse(configLookup ConfigLookup, inFile string, serviceData rawService, dat } } - fmt.Printf("result\n%#v\n", baseService) + logrus.Debugf("Merged result %#v", baseService) return baseService, nil } diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/project/project.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/project.go new file mode 100644 index 00000000..91f0d384 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/project.go @@ -0,0 +1,371 @@ +package project + +import ( + "errors" + "fmt" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libcompose/logger" + "github.com/docker/libcompose/utils" +) + +type ServiceState string + +var ( + EXECUTED ServiceState = ServiceState("executed") + UNKNOWN ServiceState = ServiceState("unknown") + ErrRestart error = errors.New("Restart execution") + ErrUnsupported error = errors.New("UnsupportedOperation") +) + +type ProjectEvent struct { + Event Event + ServiceName string + Data map[string]string +} + +type wrapperAction func(*serviceWrapper, map[string]*serviceWrapper) +type serviceAction func(service Service) error + +func NewProject(context *Context) *Project { + p := &Project{ + context: context, + Configs: make(map[string]*ServiceConfig), + } + + if context.LoggerFactory == nil { + context.LoggerFactory = &logger.NullLogger{} + } + + context.Project = p + + p.listeners = []chan<- ProjectEvent{NewDefaultListener(p)} + + return p +} + +func (p *Project) Parse() error { + err := p.context.open() + if err != nil { + return err + } + + p.Name = p.context.ProjectName + + if p.context.ComposeFile == "-" { + p.File = "." + } else { + p.File = p.context.ComposeFile + } + + if p.context.ComposeBytes != nil { + return p.Load(p.context.ComposeBytes) + } + + return nil +} + +func (p *Project) CreateService(name string) (Service, error) { + existing, ok := p.Configs[name] + if !ok { + return nil, fmt.Errorf("Failed to find service: %s", name) + } + + // Copy because we are about to modify the environment + config := *existing + + if p.context.EnvironmentLookup != nil { + parsedEnv := make([]string, 0, len(config.Environment.Slice())) + + for _, env := range config.Environment.Slice() { + if strings.IndexRune(env, '=') != -1 { + parsedEnv = append(parsedEnv, env) + continue + } + + for _, value := range p.context.EnvironmentLookup.Lookup(env, name, &config) { + parsedEnv = append(parsedEnv, value) + } + } + + config.Environment = NewMaporEqualSlice(parsedEnv) + } + + return p.context.ServiceFactory.Create(p, name, &config) +} + +func (p *Project) AddConfig(name string, config *ServiceConfig) error { + p.Notify(SERVICE_ADD, name, nil) + + p.Configs[name] = config + p.reload = append(p.reload, name) + + return nil +} + +func (p *Project) Load(bytes []byte) error { + configs := make(map[string]*ServiceConfig) + configs, err := Merge(p, bytes) + if err != nil { + log.Fatalf("Could not parse config for project %s : %v", p.Name, err) + } + + for name, config := range configs { + err := p.AddConfig(name, config) + if err != nil { + return err + } + } + + return nil +} + +func (p *Project) loadWrappers(wrappers map[string]*serviceWrapper) error { + for _, name := range p.reload { + wrapper, err := newServiceWrapper(name, p) + if err != nil { + return err + } + wrappers[name] = wrapper + } + + p.reload = []string{} + + return nil +} + +func (p *Project) Build(services ...string) error { + return p.perform(PROJECT_BUILD_START, PROJECT_BUILD_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, SERVICE_BUILD_START, SERVICE_BUILD, func(service Service) error { + return service.Build() + }) + }), nil) +} + +func (p *Project) Create(services ...string) error { + return p.perform(PROJECT_CREATE_START, PROJECT_CREATE_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, SERVICE_CREATE_START, SERVICE_CREATE, func(service Service) error { + return service.Create() + }) + }), nil) +} + +func (p *Project) Down(services ...string) error { + return p.perform(PROJECT_DOWN_START, PROJECT_DOWN_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, SERVICE_DOWN_START, SERVICE_DOWN, func(service Service) error { + return service.Down() + }) + }), nil) +} + +func (p *Project) Restart(services ...string) error { + return p.perform(PROJECT_RESTART_START, PROJECT_RESTART_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, SERVICE_RESTART_START, SERVICE_RESTART, func(service Service) error { + return service.Restart() + }) + }), nil) +} + +func (p *Project) Start(services ...string) error { + return p.perform(PROJECT_START_START, PROJECT_START_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, SERVICE_START_START, SERVICE_START, func(service Service) error { + return service.Start() + }) + }), nil) +} + +func (p *Project) Up(services ...string) error { + return p.perform(PROJECT_UP_START, PROJECT_UP_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(wrappers, SERVICE_UP_START, SERVICE_UP, func(service Service) error { + return service.Up() + }) + }), func(service Service) error { + return service.Create() + }) +} + +func (p *Project) Log(services ...string) error { + return p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, "", "", func(service Service) error { + return service.Log() + }) + }), nil) +} + +func (p *Project) Pull(services ...string) error { + return p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, SERVICE_PULL_START, SERVICE_PULL, func(service Service) error { + return service.Pull() + }) + }), nil) +} + +func (p *Project) Delete(services ...string) error { + return p.perform(PROJECT_DELETE_START, PROJECT_DELETE_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, SERVICE_DELETE_START, SERVICE_DELETE, func(service Service) error { + return service.Delete() + }) + }), nil) +} + +func (p *Project) Kill(services ...string) error { + return p.perform(PROJECT_KILL_START, PROJECT_KILL_DONE, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { + wrapper.Do(nil, SERVICE_KILL_START, SERVICE_KILL, func(service Service) error { + return service.Kill() + }) + }), nil) +} + +func (p *Project) perform(start, done Event, services []string, action wrapperAction, cycleAction serviceAction) error { + if start != "" { + p.Notify(start, "", nil) + } + + err := p.forEach(services, action, cycleAction) + + if err == nil && done != "" { + p.Notify(done, "", nil) + } + + return err +} + +func isSelected(wrapper *serviceWrapper, selected map[string]bool) bool { + return len(selected) == 0 || selected[wrapper.name] +} + +func (p *Project) forEach(services []string, action wrapperAction, cycleAction serviceAction) error { + selected := make(map[string]bool) + wrappers := make(map[string]*serviceWrapper) + + for _, s := range services { + selected[s] = true + } + + return p.traverse(selected, wrappers, action, cycleAction) +} + +func (p *Project) startService(wrappers map[string]*serviceWrapper, history []string, selected, launched map[string]bool, wrapper *serviceWrapper, action wrapperAction, cycleAction serviceAction) error { + if launched[wrapper.name] { + return nil + } + + launched[wrapper.name] = true + history = append(history, wrapper.name) + + for _, dep := range wrapper.service.DependentServices() { + target := wrappers[dep.Target] + if target == nil { + log.Errorf("Failed to find %s", dep.Target) + continue + } + + if utils.Contains(history, dep.Target) { + cycle := strings.Join(append(history, dep.Target), "->") + if dep.Optional { + log.Debugf("Ignoring cycle for %s", cycle) + wrapper.IgnoreDep(dep.Target) + if cycleAction != nil { + var err error + log.Debugf("Running cycle action for %s", cycle) + err = cycleAction(target.service) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("Cycle detected in path %s", cycle) + } + + continue + } + + err := p.startService(wrappers, history, selected, launched, target, action, cycleAction) + if err != nil { + return err + } + } + + if isSelected(wrapper, selected) { + log.Debugf("Launching action for %s", wrapper.name) + go action(wrapper, wrappers) + } else { + wrapper.Ignore() + } + + return nil +} + +func (p *Project) traverse(selected map[string]bool, wrappers map[string]*serviceWrapper, action wrapperAction, cycleAction serviceAction) error { + restart := false + + for _, wrapper := range wrappers { + if err := wrapper.Reset(); err != nil { + return err + } + } + + p.loadWrappers(wrappers) + + launched := map[string]bool{} + + for _, wrapper := range wrappers { + p.startService(wrappers, []string{}, selected, launched, wrapper, action, cycleAction) + } + + var firstError error + + for _, wrapper := range wrappers { + if !isSelected(wrapper, selected) { + continue + } + if err := wrapper.Wait(); err == ErrRestart { + restart = true + } else if err != nil { + log.Errorf("Failed to start: %s : %v", wrapper.name, err) + if firstError == nil { + firstError = err + } + } + } + + if restart { + if p.ReloadCallback != nil { + if err := p.ReloadCallback(); err != nil { + log.Errorf("Failed calling callback: %v", err) + } + } + return p.traverse(selected, wrappers, action, cycleAction) + } else { + return firstError + } +} + +func (p *Project) AddListener(c chan<- ProjectEvent) { + if !p.hasListeners { + for _, l := range p.listeners { + close(l) + } + p.hasListeners = true + p.listeners = []chan<- ProjectEvent{c} + } else { + p.listeners = append(p.listeners, c) + } +} + +func (p *Project) Notify(event Event, serviceName string, data map[string]string) { + projectEvent := ProjectEvent{ + Event: event, + ServiceName: serviceName, + Data: data, + } + + for _, l := range p.listeners { + // Don't ever block + select { + case l <- projectEvent: + default: + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/project/service-wrapper.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/service-wrapper.go new file mode 100644 index 00000000..99dda7f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/service-wrapper.go @@ -0,0 +1,116 @@ +package project + +import ( + "sync" + + log "github.com/Sirupsen/logrus" +) + +type serviceWrapper struct { + name string + service Service + done sync.WaitGroup + state ServiceState + err error + project *Project + noWait bool + ignored map[string]bool +} + +func newServiceWrapper(name string, p *Project) (*serviceWrapper, error) { + wrapper := &serviceWrapper{ + name: name, + state: UNKNOWN, + project: p, + ignored: map[string]bool{}, + } + + return wrapper, wrapper.Reset() +} + +func (s *serviceWrapper) IgnoreDep(name string) { + s.ignored[name] = true +} + +func (s *serviceWrapper) Reset() error { + if s.state != EXECUTED { + service, err := s.project.CreateService(s.name) + if err != nil { + log.Errorf("Failed to create service for %s : %v", s.name, err) + return err + } + + s.service = service + } + + if s.err == ErrRestart { + s.err = nil + } + s.done.Add(1) + + return nil +} + +func (s *serviceWrapper) Ignore() { + defer s.done.Done() + + s.state = EXECUTED + s.project.Notify(SERVICE_UP_IGNORED, s.service.Name(), nil) +} + +func (s *serviceWrapper) waitForDeps(wrappers map[string]*serviceWrapper) bool { + if s.noWait { + return true + } + + for _, dep := range s.service.DependentServices() { + if s.ignored[dep.Target] { + continue + } + + if wrapper, ok := wrappers[dep.Target]; ok { + if wrapper.Wait() == ErrRestart { + s.project.Notify(PROJECT_RELOAD, wrapper.service.Name(), nil) + s.err = ErrRestart + return false + } + } else { + log.Errorf("Failed to find %s", dep.Target) + } + } + + return true +} + +func (s *serviceWrapper) Do(wrappers map[string]*serviceWrapper, start, done Event, action func(service Service) error) { + defer s.done.Done() + + if s.state == EXECUTED { + return + } + + if wrappers != nil && !s.waitForDeps(wrappers) { + return + } + + s.state = EXECUTED + + if start != "" { + s.project.Notify(start, s.service.Name(), nil) + } + + s.err = action(s.service) + if s.err == ErrRestart { + s.project.Notify(done, s.service.Name(), nil) + s.project.Notify(PROJECT_RELOAD_TRIGGER, s.service.Name(), nil) + } else if s.err != nil { + log.Errorf("Failed %s %s : %v", start, s.name, s.err) + } else if done != "" { + s.project.Notify(done, s.service.Name(), nil) + } +} + +func (s *serviceWrapper) Wait() error { + s.done.Wait() + return s.err +} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/types.go similarity index 70% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types.go rename to Godeps/_workspace/src/github.com/docker/libcompose/project/types.go index 54ad9cd0..9e61a9c2 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types.go +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/types.go @@ -1,15 +1,12 @@ package project -import "github.com/rancherio/go-rancher/client" - type Event string const ( CONTAINER_ID = "container_id" - CONTAINER_STARTING = Event("Starting container") - CONTAINER_CREATED = Event("Created container") - CONTAINER_STARTED = Event("Started container") + CONTAINER_CREATED = Event("Created container") + CONTAINER_STARTED = Event("Started container") SERVICE_ADD = Event("Adding") SERVICE_UP_START = Event("Starting") @@ -23,6 +20,14 @@ const ( SERVICE_DOWN = Event("Stopped") SERVICE_RESTART_START = Event("Restarting") SERVICE_RESTART = Event("Restarted") + SERVICE_PULL_START = Event("Pulling") + SERVICE_PULL = Event("Pulled") + SERVICE_KILL_START = Event("Killing") + SERVICE_KILL = Event("Killed") + SERVICE_START_START = Event("Starting") + SERVICE_START = Event("Started") + SERVICE_BUILD_START = Event("Building") + SERVICE_BUILD = Event("Built") PROJECT_DOWN_START = Event("Stopping project") PROJECT_DOWN_DONE = Event("Project stopped") @@ -36,8 +41,21 @@ const ( PROJECT_RESTART_DONE = Event("Project restarted") PROJECT_RELOAD = Event("Reloading project") PROJECT_RELOAD_TRIGGER = Event("Triggering project reload") + PROJECT_KILL_START = Event("Killing project") + PROJECT_KILL_DONE = Event("Project killed") + PROJECT_START_START = Event("Starting project") + PROJECT_START_DONE = Event("Project started") + PROJECT_BUILD_START = Event("Building project") + PROJECT_BUILD_DONE = Event("Project built") ) +type InfoPart struct { + Key, Value string +} + +type InfoSet []Info +type Info []InfoPart + type ServiceConfig struct { Build string `yaml:"build,omitempty"` CapAdd []string `yaml:"cap_add,omitempty"` @@ -93,37 +111,64 @@ type ConfigLookup interface { } type Project struct { - EnvironmentLookup EnvironmentLookup - ConfigLookup ConfigLookup - Name string - Configs map[string]*ServiceConfig - reload []string - File string - client *client.RancherClient - factory ServiceFactory - ReloadCallback func() error - upCount int - listeners []chan<- ProjectEvent - hasListeners bool -} - -type Container struct { - Name string - State string + Name string + Configs map[string]*ServiceConfig + File string + ReloadCallback func() error + context *Context + reload []string + upCount int + listeners []chan<- ProjectEvent + hasListeners bool } type Service interface { + Info() (InfoSet, error) Name() string + Build() error Create() error Up() error + Start() error Down() error Delete() error Restart() error Log() error + Pull() error + Kill() error Config() *ServiceConfig + DependentServices() []ServiceRelationship + Containers() ([]Container, error) Scale(count int) error } +type Container interface { + Id() (string, error) + Name() string + Port(port string) (string, error) +} + type ServiceFactory interface { Create(project *Project, name string, serviceConfig *ServiceConfig) (Service, error) } + +type ServiceRelationshipType string + +const REL_TYPE_LINK = ServiceRelationshipType("") +const REL_TYPE_NET_NAMESPACE = ServiceRelationshipType("netns") +const REL_TYPE_IPC_NAMESPACE = ServiceRelationshipType("ipc") +const REL_TYPE_VOLUMES_FROM = ServiceRelationshipType("volumesFrom") + +type ServiceRelationship struct { + Target, Alias string + Type ServiceRelationshipType + Optional bool +} + +func NewServiceRelationship(nameAlias string, relType ServiceRelationshipType) ServiceRelationship { + name, alias := NameAlias(nameAlias) + return ServiceRelationship{ + Target: name, + Alias: alias, + Type: relType, + } +} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types_yaml.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/types_yaml.go similarity index 100% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types_yaml.go rename to Godeps/_workspace/src/github.com/docker/libcompose/project/types_yaml.go diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types_yaml_test.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/types_yaml_test.go similarity index 100% rename from Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/types_yaml_test.go rename to Godeps/_workspace/src/github.com/docker/libcompose/project/types_yaml_test.go diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/project/utils.go b/Godeps/_workspace/src/github.com/docker/libcompose/project/utils.go new file mode 100644 index 00000000..e8f14986 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/project/utils.go @@ -0,0 +1,63 @@ +package project + +import ( + "strings" + + "github.com/docker/docker/runconfig" +) + +func DefaultDependentServices(p *Project, s Service) []ServiceRelationship { + config := s.Config() + if config == nil { + return []ServiceRelationship{} + } + + result := []ServiceRelationship{} + for _, link := range config.Links.Slice() { + result = append(result, NewServiceRelationship(link, REL_TYPE_LINK)) + } + + for _, volumesFrom := range config.VolumesFrom { + result = append(result, NewServiceRelationship(volumesFrom, REL_TYPE_VOLUMES_FROM)) + } + + result = appendNs(p, result, s.Config().Net, REL_TYPE_NET_NAMESPACE) + result = appendNs(p, result, s.Config().Ipc, REL_TYPE_IPC_NAMESPACE) + + return result +} + +func appendNs(p *Project, rels []ServiceRelationship, conf string, relType ServiceRelationshipType) []ServiceRelationship { + service := GetContainerFromIpcLikeConfig(p, conf) + if service != "" { + rels = append(rels, NewServiceRelationship(service, relType)) + } + return rels +} + +func NameAlias(name string) (string, string) { + parts := strings.SplitN(name, ":", 2) + if len(parts) == 2 { + return parts[0], parts[1] + } else { + return parts[0], parts[0] + } +} + +func GetContainerFromIpcLikeConfig(p *Project, conf string) string { + ipc := runconfig.IpcMode(conf) + if !ipc.IsContainer() { + return "" + } + + name := ipc.Container() + if name == "" { + return "" + } + + if _, ok := p.Configs[name]; ok { + return name + } else { + return "" + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcompose/utils/util.go b/Godeps/_workspace/src/github.com/docker/libcompose/utils/util.go new file mode 100644 index 00000000..11cfce6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcompose/utils/util.go @@ -0,0 +1,95 @@ +package utils + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + + "gopkg.in/yaml.v2" +) + +type InParallel struct { + wg sync.WaitGroup + pool sync.Pool +} + +func (i *InParallel) Add(task func() error) { + i.wg.Add(1) + + go func() { + defer i.wg.Done() + err := task() + if err != nil { + i.pool.Put(err) + } + }() +} + +func (i *InParallel) Wait() error { + i.wg.Wait() + obj := i.pool.Get() + if err, ok := obj.(error); ok { + return err + } else { + return nil + } +} + +func ConvertByJson(src, target interface{}) error { + newBytes, err := json.Marshal(src) + if err != nil { + return err + } + + err = json.Unmarshal(newBytes, target) + if err != nil { + logrus.Errorf("Failed to unmarshall: %v\n%s", err, string(newBytes)) + } + return err +} + +func Convert(src, target interface{}) error { + newBytes, err := yaml.Marshal(src) + if err != nil { + return err + } + + err = yaml.Unmarshal(newBytes, target) + if err != nil { + logrus.Errorf("Failed to unmarshall: %v\n%s", err, string(newBytes)) + } + return err +} + +func ConvertToInterfaceMap(input map[string]string) map[string]interface{} { + result := map[string]interface{}{} + for k, v := range input { + result[k] = v + } + + return result +} + +func FilterString(data map[string][]string) string { + // I can't imagine this would ever fail + bytes, _ := json.Marshal(data) + return string(bytes) +} + +func LabelFilter(key, value string) string { + return FilterString(map[string][]string{ + "label": {fmt.Sprintf("%s=%s", key, value)}, + }) +} + +func Contains(collection []string, key string) bool { + for _, value := range collection { + if value == key { + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/README.md b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/README.md new file mode 100644 index 00000000..cdda554b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/README.md @@ -0,0 +1 @@ +Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf diff --git a/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go new file mode 100644 index 00000000..d581a191 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go @@ -0,0 +1,17 @@ +package dns + +import ( + "regexp" +) + +// IPLocalhost is a regex patter for localhost IP address range. +const IPLocalhost = `((127\.([0-9]{1,3}.){2}[0-9]{1,3})|(::1))` + +var localhostIPRegexp = regexp.MustCompile(IPLocalhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func IsLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} diff --git a/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf.go b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf.go new file mode 100644 index 00000000..ebe3b71a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf.go @@ -0,0 +1,187 @@ +// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/libnetwork/resolvconf/dns" +) + +var ( + // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS + defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} + defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} + ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` + ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock + // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also + // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants + // -- e.g. other link-local types -- either won't work in containers or are unnecessary. + // For readability and sufficiency for Docker purposes this seemed more reasonable than a + // 1000+ character regexp with exact and complete IPv6 validation + ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})` + + localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) + nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) +) + +var lastModified struct { + sync.Mutex + sha256 string + contents []byte +} + +// Get returns the contents of /etc/resolv.conf +func Get() ([]byte, error) { + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, err + } + return resolv, nil +} + +// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash +// and, if modified since last check, returns the bytes and new hash. +// This feature is used by the resolv.conf updater for containers +func GetIfChanged() ([]byte, string, error) { + lastModified.Lock() + defer lastModified.Unlock() + + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, "", err + } + newHash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, "", err + } + if lastModified.sha256 != newHash { + lastModified.sha256 = newHash + lastModified.contents = resolv + return resolv, newHash, nil + } + // nothing changed, so return no data + return nil, "", nil +} + +// GetLastModified retrieves the last used contents and hash of the host resolv.conf. +// Used by containers updating on restart +func GetLastModified() ([]byte, string) { + lastModified.Lock() + defer lastModified.Unlock() + + return lastModified.contents, lastModified.sha256 +} + +// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: +// 1. It looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers +// +// It returns a boolean to notify the caller if changes were made at all +func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) ([]byte, bool) { + changed := false + cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) + // if IPv6 is not enabled, also clean out any IPv6 address nameserver + if !ipv6Enabled { + cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) + } + // if the resulting resolvConf has no more nameservers defined, add appropriate + // default DNS servers for IPv4 and (optionally) IPv6 + if len(GetNameservers(cleanedResolvConf)) == 0 { + logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers : %v", defaultIPv4Dns) + dns := defaultIPv4Dns + if ipv6Enabled { + logrus.Infof("IPv6 enabled; Adding default IPv6 external servers : %v", defaultIPv6Dns) + dns = append(dns, defaultIPv6Dns...) + } + cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) + } + if !bytes.Equal(resolvConf, cleanedResolvConf) { + changed = true + } + return cleanedResolvConf, changed +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + var ns = nsRegexp.FindSubmatch(line) + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf) { + nameservers = append(nameservers, nameserver+"/32") + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +// Build writes a configuration file to path containing a "nameserver" entry +// for every element in dns, and a "search" entry for every element in +// dnsSearch. +func Build(path string, dns, dnsSearch []string) error { + content := bytes.NewBuffer(nil) + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return err + } + } + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return err + } + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go new file mode 100644 index 00000000..a21c7afb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go @@ -0,0 +1,240 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + + _ "github.com/docker/libnetwork/netutils" +) + +func TestGet(t *testing.T) { + resolvConfUtils, err := Get() + if err != nil { + t.Fatal(err) + } + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + t.Fatal(err) + } + if string(resolvConfUtils) != string(resolvConfSystem) { + t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") + } +} + +func TestGetNameservers(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4", "40.3.200.10"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, + } { + test := GetNameservers([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetNameserversAsCIDR(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, + } { + test := GetNameserversAsCIDR([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetSearchDomains(t *testing.T) { + for resolv, result := range map[string][]string{ + `search example.com`: {"example.com"}, + `search example.com # ignored`: {"example.com"}, + ` search example.com `: {"example.com"}, + ` search example.com # ignored`: {"example.com"}, + `search foo.example.com example.com`: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, + ``: {}, + `# ignored`: {}, + `nameserver 1.2.3.4 +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search dup1.example.com dup2.example.com +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search foo.example.com example.com +nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, + } { + test := GetSearchDomains([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func strSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + + return true +} + +func TestBuild(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildWithZeroLengthDomainSearch(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) { + t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content) + } +} + +func TestFilterResolvDns(t *testing.T) { + ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n" + + if result, _ := FilterResolvDNS([]byte(ns0), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver ::1\nnameserver 10.16.60.14\nnameserver 127.0.2.1\nnameserver 10.16.60.21\n" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 10.16.60.14\nnameserver ::1\nnameserver 10.16.60.21\nnameserver ::1" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + // with IPv6 disabled (false param), the IPv6 nameserver should be removed + ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost+IPv6 off: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + // with IPv6 enabled, the IPv6 nameserver should be preserved + ns0 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\n" + ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1" + if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost+IPv6 on: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + // with IPv6 enabled, and no non-localhost servers, Google defaults (both IPv4+IPv6) should be added + ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\nnameserver 2001:4860:4860::8888\nnameserver 2001:4860:4860::8844" + ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1" + if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + // with IPv6 disabled, and no non-localhost servers, Google defaults (only IPv4) should be added + ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" + ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1" + if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..8e7db381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go new file mode 100644 index 00000000..c111f353 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go @@ -0,0 +1,111 @@ +package libtrust + +import ( + "encoding/pem" + "io/ioutil" + "net" + "os" + "path" + "testing" +) + +func TestGenerateCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedClientCert(key) + if err != nil { + t.Fatal(err) + } +} + +func TestGenerateCACertPool(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) + if err != nil { + t.Fatal(err) + } +} + +func TestLoadCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cert1, err := GenerateCACert(caKey1, key) + if err != nil { + t.Fatal(err) + } + cert2, err := GenerateCACert(caKey2, key) + if err != nil { + t.Fatal(err) + } + + d, err := ioutil.TempDir("/tmp", "cert-test") + if err != nil { + t.Fatal(err) + } + caFile := path.Join(d, "ca.pem") + f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatal(err) + } + + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) + if err != nil { + t.Fatal(err) + } + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) + if err != nil { + t.Fatal(err) + } + f.Close() + + certs, err := LoadCertificateBundle(caFile) + if err != nil { + t.Fatal(err) + } + if len(certs) != 2 { + t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) + } + + pool, err := LoadCertificatePool(caFile) + if err != nil { + t.Fatal(err) + } + + if len(pool.Subjects()) != 2 { + t.Fatalf("Invalid certificate pool") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go new file mode 100644 index 00000000..00bbe4b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go new file mode 100644 index 00000000..26ac3814 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "testing" +) + +func generateECTestKeys(t *testing.T) []PrivateKey { + p256Key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + p384Key, err := GenerateECP384PrivateKey() + if err != nil { + t.Fatal(err) + } + + p521Key, err := GenerateECP521PrivateKey() + if err != nil { + t.Fatal(err) + } + + return []PrivateKey{p256Key, p384Key, p521Key} +} + +func TestECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + if ecKey.KeyType() != "EC" { + t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) + } + } +} + +func TestECSignVerify(t *testing.T) { + ecKeys := generateECTestKeys(t) + + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = ecKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + cryptoPrivateKey := ecKey.CryptoPrivateKey() + cryptoPublicKey := ecKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} + +func TestExtendedFields(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + key.AddExtendedField("test", "foobar") + val := key.GetExtendedField("test") + + gotVal, ok := val.(string) + if !ok { + t.Fatalf("value is not a string") + } else if gotVal != val { + t.Fatalf("value %q is not equal to %q", gotVal, val) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go new file mode 100644 index 00000000..997e554c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go @@ -0,0 +1,81 @@ +package libtrust + +import ( + "testing" +) + +func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { + if len(sliceA) != len(sliceB) { + t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) + } + + for i, itemA := range sliceA { + itemB := sliceB[i] + if itemA != itemB { + t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) + } + } +} + +func TestFilter(t *testing.T) { + keys := make([]PublicKey, 0, 8) + + // Create 8 keys and add host entries. + for i := 0; i < cap(keys); i++ { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + // we use both []interface{} and []string here because jwt uses + // []interface{} format, while PEM uses []string + switch { + case i == 0: + // Don't add entries for this key, key 0. + break + case i%2 == 0: + // Should catch keys 2, 4, and 6. + key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) + case i == 7: + // Should catch only the last key, and make it match any hostname. + key.AddExtendedField("hosts", []string{"*"}) + default: + // should catch keys 1, 3, 5. + key.AddExtendedField("hosts", []string{"*.example.com"}) + } + + keys = append(keys, key) + } + + // Should match 2 keys, the empty one, and the one that matches all hosts. + matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) + if err != nil { + t.Fatal(err) + } + expectedMatch := []PublicKey{keys[0], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match 1 key, the one that matches any host. + matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match keys that end in "example.com", and the key that matches anything. + matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match all of the keys except the empty key. + matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = keys[1:] + compareKeySlices(t, expectedMatch, matchedKeys) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go new file mode 100644 index 00000000..b4f26979 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go @@ -0,0 +1,380 @@ +package libtrust + +import ( + "bytes" + "crypto/rand" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "testing" + + "github.com/docker/libtrust/testutil" +) + +func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { + testMap := map[string]interface{}{ + "name": "dmcgowan/mycontainer", + "config": map[string]interface{}{ + "ports": []int{9101, 9102}, + "run": "/bin/echo \"Hello\"", + }, + "layers": []string{ + "2893c080-27f5-11e4-8c21-0800200c9a66", + "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", + "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", + "0b6da891-7f7f-4abf-9c97-7887549e696c", + "1d960389-ae4f-4011-85fd-18d0f96a67ad", + }, + } + formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` + formattedSection = fmt.Sprintf(formattedSection, sigKey) + if indent != "" { + buf := bytes.NewBuffer(nil) + json.Indent(buf, []byte(formattedSection), "", indent) + return testMap, buf.Bytes() + } + return testMap, []byte(formattedSection) + +} + +func TestSignJSON(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + +} + +func TestSignMap(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func TestFormattedJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + + var unmarshalled map[string]interface{} + err = json.Unmarshal(b, &unmarshalled) + if err != nil { + t.Fatalf("Could not unmarshall after parse: %s", err) + } + +} + +func TestFormattedFlatJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", "") + unindented, err := json.Marshal(testMap) + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(unindented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { + parent := ca + parentKey := key + chain := make([]*x509.Certificate, 6) + for i := 5; i > 0; i-- { + intermediatekey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + return trustKey, chain +} + +func TestChainVerify(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err != nil { + t.Fatalf("Error verifying content: %s", err) + } + if len(chains) != 1 { + t.Fatalf("Unexpected chains length: %d", len(chains)) + } + if len(chains[0]) != 7 { + t.Fatalf("Unexpected chain length: %d", len(chains[0])) + } +} + +func TestInvalidChain(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain[:5]) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err == nil { + t.Fatalf("Expected error verifying with bad chain") + } + if len(chains) != 0 { + t.Fatalf("Unexpected chains returned from invalid verify") + } +} + +func TestMergeSignatures(t *testing.T) { + pk1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key 1: %v", err) + } + + pk2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key 2: %v", err) + } + + payload := make([]byte, 1<<10) + if _, err = io.ReadFull(rand.Reader, payload); err != nil { + t.Fatalf("error generating payload: %v", err) + } + + payload, _ = json.Marshal(map[string]interface{}{"data": payload}) + + sig1, err := NewJSONSignature(payload) + if err != nil { + t.Fatalf("unexpected error creating signature 1: %v", err) + } + + if err := sig1.Sign(pk1); err != nil { + t.Fatalf("unexpected error signing with pk1: %v", err) + } + + sig2, err := NewJSONSignature(payload) + if err != nil { + t.Fatalf("unexpected error creating signature 2: %v", err) + } + + if err := sig2.Sign(pk2); err != nil { + t.Fatalf("unexpected error signing with pk2: %v", err) + } + + // Now, we actually merge into sig1 + if err := sig1.Merge(sig2); err != nil { + t.Fatalf("unexpected error merging: %v", err) + } + + // Verify the new signature package + pubkeys, err := sig1.Verify() + if err != nil { + t.Fatalf("unexpected error during verify: %v", err) + } + + // Make sure the pubkeys match the two private keys from before + privkeys := map[string]PrivateKey{ + pk1.KeyID(): pk1, + pk2.KeyID(): pk2, + } + + found := map[string]struct{}{} + + for _, pubkey := range pubkeys { + if _, ok := privkeys[pubkey.KeyID()]; !ok { + t.Fatalf("unexpected public key found during verification: %v", pubkey) + } + + found[pubkey.KeyID()] = struct{}{} + } + + // Make sure we've found all the private keys from verification + for keyid, _ := range privkeys { + if _, ok := found[keyid]; !ok { + t.Fatalf("public key %v not found during verification", keyid) + } + } + + // Create another signature, with a different payload, and ensure we get an error. + sig3, err := NewJSONSignature([]byte("{}")) + if err != nil { + t.Fatalf("unexpected error making signature for sig3: %v", err) + } + + if err := sig1.Merge(sig3); err == nil { + t.Fatalf("error expected during invalid merge with different payload") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/Godeps/_workspace/src/github.com/docker/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go new file mode 100644 index 00000000..57e691f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go @@ -0,0 +1,220 @@ +package libtrust + +import ( + "errors" + "io/ioutil" + "os" + "testing" +) + +func makeTempFile(t *testing.T, prefix string) (filename string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + t.Fatal(err) + } + + filename = file.Name() + file.Close() + + return +} + +func TestKeyFiles(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) + + key, err = GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) +} + +func testKeyFiles(t *testing.T, key PrivateKey) { + var err error + + privateKeyFilename := makeTempFile(t, "private_key") + privateKeyFilenamePEM := privateKeyFilename + ".pem" + privateKeyFilenameJWK := privateKeyFilename + ".jwk" + + publicKeyFilename := makeTempFile(t, "public_key") + publicKeyFilenamePEM := publicKeyFilename + ".pem" + publicKeyFilenameJWK := publicKeyFilename + ".jwk" + + if err = SaveKey(privateKeyFilenamePEM, key); err != nil { + t.Fatal(err) + } + + if err = SaveKey(privateKeyFilenameJWK, key); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { + t.Fatal(err) + } + + loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + if key.KeyID() != loadedPEMKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedPEMPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + os.Remove(privateKeyFilename) + os.Remove(privateKeyFilenamePEM) + os.Remove(privateKeyFilenameJWK) + os.Remove(publicKeyFilename) + os.Remove(publicKeyFilenamePEM) + os.Remove(publicKeyFilenameJWK) +} + +func TestTrustedHostKeysFile(t *testing.T) { + trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") + trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" + trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" + + testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) + testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) + + os.Remove(trustedHostKeysFilename) + os.Remove(trustedHostKeysFilenamePEM) + os.Remove(trustedHostKeysFilenameJWK) +} + +func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { + hostAddress1 := "docker.example.com:2376" + hostKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey1.AddExtendedField("hosts", []string{hostAddress1}) + err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %d\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + + hostAddress2 := "192.168.59.103:2376" + hostKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey2.AddExtendedField("hosts", hostAddress2) + err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %d\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + +} + +func TestTrustedClientKeysFile(t *testing.T) { + trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") + trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" + trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" + + testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) + testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) + + os.Remove(trustedClientKeysFilename) + os.Remove(trustedClientKeysFilenamePEM) + os.Remove(trustedClientKeysFilenameJWK) +} + +func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { + clientKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } + + clientKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go new file mode 100644 index 00000000..f6c59cc4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go @@ -0,0 +1,80 @@ +package libtrust + +import ( + "testing" +) + +type generateFunc func() (PrivateKey, error) + +func runGenerateBench(b *testing.B, f generateFunc, name string) { + for i := 0; i < b.N; i++ { + _, err := f() + if err != nil { + b.Fatalf("Error generating %s: %s", name, err) + } + } +} + +func runFingerprintBench(b *testing.B, f generateFunc, name string) { + b.StopTimer() + // Don't count this relatively slow generation call. + key, err := f() + if err != nil { + b.Fatalf("Error generating %s: %s", name, err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if key.KeyID() == "" { + b.Fatalf("Error generating key ID for %s", name) + } + } +} + +func BenchmarkECP256Generate(b *testing.B) { + runGenerateBench(b, GenerateECP256PrivateKey, "P256") +} + +func BenchmarkECP384Generate(b *testing.B) { + runGenerateBench(b, GenerateECP384PrivateKey, "P384") +} + +func BenchmarkECP521Generate(b *testing.B) { + runGenerateBench(b, GenerateECP521PrivateKey, "P521") +} + +func BenchmarkRSA2048Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") +} + +func BenchmarkRSA3072Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") +} + +func BenchmarkRSA4096Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") +} + +func BenchmarkECP256Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP256PrivateKey, "P256") +} + +func BenchmarkECP384Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP384PrivateKey, "P384") +} + +func BenchmarkECP521Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP521PrivateKey, "P521") +} + +func BenchmarkRSA2048Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") +} + +func BenchmarkRSA3072Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") +} + +func BenchmarkRSA4096Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go new file mode 100644 index 00000000..5ec7707a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "log" + "testing" +) + +var rsaKeys []PrivateKey + +func init() { + var err error + rsaKeys, err = generateRSATestKeys() + if err != nil { + log.Fatal(err) + } +} + +func generateRSATestKeys() (keys []PrivateKey, err error) { + log.Println("Generating RSA 2048-bit Test Key") + rsa2048Key, err := GenerateRSA2048PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 3072-bit Test Key") + rsa3072Key, err := GenerateRSA3072PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 4096-bit Test Key") + rsa4096Key, err := GenerateRSA4096PrivateKey() + if err != nil { + return + } + + log.Println("Done generating RSA Test Keys!") + keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} + + return +} + +func TestRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + if rsaKey.KeyType() != "RSA" { + t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) + } + } +} + +func TestRSASignVerify(t *testing.T) { + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = rsaKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalRSAKeys(t *testing.T) { + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + + // It's a good idea to validate the Private Key to make sure our + // (un)marshal process didn't corrupt the extra parameters. + k := privKey2.(*rsaPrivateKey) + err = k.PrivateKey.Validate() + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + cryptoPrivateKey := rsaKey.CryptoPrivateKey() + cryptoPublicKey := rsaKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go new file mode 100644 index 00000000..89debf6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go @@ -0,0 +1,94 @@ +package testutil + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" +) + +// GenerateTrustCA generates a new certificate authority for testing. +func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "CA Root", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateIntermediate generates an intermediate certificate for testing using +// the parent certificate (likely a CA) and the provided keys. +func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Intermediate", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateTrustCert generates a new trust certificate for testing. Unlike the +// intermediate certificates, this certificate should be used for signature +// only, not creating certificates. +func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Trust Cert", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md new file mode 100644 index 00000000..24124db2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md @@ -0,0 +1,50 @@ +## Libtrust TLS Config Demo + +This program generates key pairs and trust files for a TLS client and server. + +To generate the keys, run: + +``` +$ go run genkeys.go +``` + +The generated files are: + +``` +$ ls -l client_data/ server_data/ +client_data/: +total 24 +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json + +server_data/: +total 24 +-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +``` + +The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. + +To start the server, run: + +``` +$ go run server.go +``` + +This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. + +To make a request using the client, run: + +``` +$ go run client.go +``` + +This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. + +The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). + +``` +curl --cert cert.pem --key key.pem -k https://localhost:8888 +``` diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go new file mode 100644 index 00000000..0a699a0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go @@ -0,0 +1,89 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + // Load Client Key. + clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate Client Certificate. + selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) + if err != nil { + log.Fatal(err) + } + + // Load trusted host keys. + hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + // Ensure the host we want to connect to is trusted! + host, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) + if err != nil { + log.Fatalf("%q is not a known and trusted host", host) + } + + // Generate a CA pool with the trusted host's key. + caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) + if err != nil { + log.Fatal(err) + } + + // Create HTTP Client. + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedClientCert.Raw}, + PrivateKey: clientKey.CryptoPrivateKey(), + Leaf: selfSignedClientCert, + }, + }, + RootCAs: caPool, + }, + }, + } + + var makeRequest = func(url string) { + resp, err := client.Get(url) + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Fatal(err) + } + + log.Println(resp.Status) + log.Println(string(body)) + } + + // Make the request to the trusted server! + makeRequest(fmt.Sprintf("https://%s", serverAddress)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go new file mode 100644 index 00000000..c65f3b6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go @@ -0,0 +1,62 @@ +package main + +import ( + "encoding/pem" + "fmt" + "log" + "net" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + clientPrivateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) + if err != nil { + log.Fatal(err) + } + + keyPEMBlock, err := key.PEMBlock() + if err != nil { + log.Fatal(err) + } + + encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) + fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) + + cert, err := libtrust.GenerateSelfSignedClientCert(key) + if err != nil { + log.Fatal(err) + } + + encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) + + trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + hostname, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + + trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) + if err != nil { + log.Fatal(err) + } + + caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) + if err != nil { + log.Fatal(err) + } + + encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) + fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go new file mode 100644 index 00000000..9dc8842a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go @@ -0,0 +1,61 @@ +package main + +import ( + "log" + + "github.com/docker/libtrust" +) + +func main() { + // Generate client key. + clientKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Add a comment for the client key. + clientKey.AddExtendedField("comment", "TLS Demo Client") + + // Save the client key, public and private versions. + err = libtrust.SaveKey("client_data/private_key.pem", clientKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate server key. + serverKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Set the list of addresses to use for the server. + serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) + + // Save the server key, public and private versions. + err = libtrust.SaveKey("server_data/private_key.pem", serverKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Authorized Keys file for server. + err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Known Host Keys file for client. + err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go new file mode 100644 index 00000000..d3cb2ea9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go @@ -0,0 +1,80 @@ +package main + +import ( + "crypto/tls" + "fmt" + "html" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "server_data/private_key.pem" + authorizedClientsFilename = "server_data/trusted_clients.pem" +) + +func requestHandler(w http.ResponseWriter, r *http.Request) { + clientCert := r.TLS.PeerCertificates[0] + keyID := clientCert.Subject.CommonName + log.Printf("Request from keyID: %s\n", keyID) + fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) +} + +func main() { + // Load server key. + serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate server certificate. + selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( + serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, + ) + if err != nil { + log.Fatal(err) + } + + // Load authorized client keys. + authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) + if err != nil { + log.Fatal(err) + } + + // Create CA pool using trusted client keys. + caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) + if err != nil { + log.Fatal(err) + } + + // Create TLS config, requiring client certificates. + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedServerCert.Raw}, + PrivateKey: serverKey.CryptoPrivateKey(), + Leaf: selfSignedServerCert, + }, + }, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: caPool, + } + + // Create HTTP server with simple request handler. + server := &http.Server{ + Addr: serverAddress, + Handler: http.HandlerFunc(requestHandler), + } + + // Listen and server HTTPS using the libtrust TLS config. + listener, err := net.Listen("tcp", server.Addr) + if err != nil { + log.Fatal(err) + } + tlsListener := tls.NewListener(listener, tlsConfig) + server.Serve(tlsListener) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go new file mode 100644 index 00000000..72b0fc36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go @@ -0,0 +1,50 @@ +package trustgraph + +import "github.com/docker/libtrust" + +// TrustGraph represents a graph of authorization mapping +// public keys to nodes and grants between nodes. +type TrustGraph interface { + // Verifies that the given public key is allowed to perform + // the given action on the given node according to the trust + // graph. + Verify(libtrust.PublicKey, string, uint16) (bool, error) + + // GetGrants returns an array of all grant chains which are used to + // allow the requested permission. + GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) +} + +// Grant represents a transfer of permission from one part of the +// trust graph to another. This is the only way to delegate +// permission between two different sub trees in the graph. +type Grant struct { + // Subject is the namespace being granted + Subject string + + // Permissions is a bit map of permissions + Permission uint16 + + // Grantee represents the node being granted + // a permission scope. The grantee can be + // either a namespace item or a key id where namespace + // items will always start with a '/'. + Grantee string + + // statement represents the statement used to create + // this object. + statement *Statement +} + +// Permissions +// Read node 0x01 (can read node, no sub nodes) +// Write node 0x02 (can write to node object, cannot create subnodes) +// Read subtree 0x04 (delegates read to each sub node) +// Write subtree 0x08 (delegates write to each sub node, included create on the subject) +// +// Permission shortcuts +// ReadItem = 0x01 +// WriteItem = 0x03 +// ReadAccess = 0x07 +// WriteAccess = 0x0F +// Delegate = 0x0F diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go new file mode 100644 index 00000000..247bfa7a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go @@ -0,0 +1,133 @@ +package trustgraph + +import ( + "strings" + + "github.com/docker/libtrust" +) + +type grantNode struct { + grants []*Grant + children map[string]*grantNode +} + +type memoryGraph struct { + roots map[string]*grantNode +} + +func newGrantNode() *grantNode { + return &grantNode{ + grants: []*Grant{}, + children: map[string]*grantNode{}, + } +} + +// NewMemoryGraph returns a new in memory trust graph created from +// a static list of grants. This graph is immutable after creation +// and any alterations should create a new instance. +func NewMemoryGraph(grants []*Grant) TrustGraph { + roots := map[string]*grantNode{} + for _, grant := range grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + if part != "" { + node.grants = append(node.grants, grant) + } + nodes = node.children + } + } + return &memoryGraph{roots} +} + +func (g *memoryGraph) getGrants(name string) []*Grant { + nameParts := strings.Split(name, "/") + nodes := g.roots + var node *grantNode + var nodeOk bool + for _, part := range nameParts { + node, nodeOk = nodes[part] + if !nodeOk { + return nil + } + nodes = node.children + } + return node.grants +} + +func isSubName(name, sub string) bool { + if strings.HasPrefix(name, sub) { + if len(name) == len(sub) || name[len(sub)] == '/' { + return true + } + } + return false +} + +type walkFunc func(*Grant, []*Grant) bool + +func foundWalkFunc(*Grant, []*Grant) bool { + return true +} + +func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { + if visited == nil { + visited = map[*Grant]bool{} + } + grants := g.getGrants(start) + subGrants := make([]*Grant, 0, len(grants)) + for _, grant := range grants { + if visited[grant] { + continue + } + visited[grant] = true + if grant.Permission&permission == permission { + if isSubName(target, grant.Subject) { + if f(grant, chain) { + return true + } + } else { + subGrants = append(subGrants, grant) + } + } + } + for _, grant := range subGrants { + var chainCopy []*Grant + if collect { + chainCopy = make([]*Grant, len(chain)+1) + copy(chainCopy, chain) + chainCopy[len(chainCopy)-1] = grant + } else { + chainCopy = nil + } + + if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { + return true + } + } + return false +} + +func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { + return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil +} + +func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { + grants := [][]*Grant{} + collect := func(grant *Grant, chain []*Grant) bool { + grantChain := make([]*Grant, len(chain)+1) + copy(grantChain, chain) + grantChain[len(grantChain)-1] = grant + grants = append(grants, grantChain) + return false + } + g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) + return grants, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go new file mode 100644 index 00000000..49fd0f3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go @@ -0,0 +1,174 @@ +package trustgraph + +import ( + "fmt" + "testing" + + "github.com/docker/libtrust" +) + +func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { + grants := make([]*Grant, count) + keys := make([]libtrust.PrivateKey, count) + for i := 0; i < count; i++ { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + grant := &Grant{ + Subject: fmt.Sprintf("/user-%d", i+1), + Permission: 0x0f, + Grantee: pk.KeyID(), + } + keys[i] = pk + grants[i] = grant + } + return grants, keys +} + +func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if !ok { + t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if ok { + t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func TestVerify(t *testing.T) { + grants, keys := createTestKeysAndGrants(4) + extraGrants := make([]*Grant, 3) + extraGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-4", + Permission: 0x07, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) + testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) +} + +func TestCircularWalk(t *testing.T) { + grants, keys := createTestKeysAndGrants(3) + user1Grant := &Grant{ + Subject: "/user-2", + Permission: 0x0f, + Grantee: "/user-1", + } + user2Grant := &Grant{ + Subject: "/user-1", + Permission: 0x0f, + Grantee: "/user-2", + } + grants = append(grants, user1Grant, user2Grant) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) +} + +func assertGrantSame(t *testing.T, actual, expected *Grant) { + if actual != expected { + t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) + } +} + +func TestGetGrants(t *testing.T) { + grants, keys := createTestKeysAndGrants(5) + extraGrants := make([]*Grant, 4) + extraGrants[0] = &Grant{ + Subject: "/user-3/friend-project", + Permission: 0x0f, + Grantee: "/user-2/friends", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-2/friends", + Permission: 0x0f, + Grantee: "/user-5/fun-project", + } + extraGrants[3] = &Grant{ + Subject: "/user-5/fun-project", + Permission: 0x0f, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 2 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[3]) + assertGrantSame(t, grantChains[0][1], extraGrants[1]) + + grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 4 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[0]) + assertGrantSame(t, grantChains[0][1], extraGrants[3]) + assertGrantSame(t, grantChains[0][2], extraGrants[2]) + assertGrantSame(t, grantChains[0][3], extraGrants[0]) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go new file mode 100644 index 00000000..7a74b553 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go @@ -0,0 +1,227 @@ +package trustgraph + +import ( + "crypto/x509" + "encoding/json" + "io" + "io/ioutil" + "sort" + "strings" + "time" + + "github.com/docker/libtrust" +) + +type jsonGrant struct { + Subject string `json:"subject"` + Permission uint16 `json:"permission"` + Grantee string `json:"grantee"` +} + +type jsonRevocation struct { + Subject string `json:"subject"` + Revocation uint16 `json:"revocation"` + Grantee string `json:"grantee"` +} + +type jsonStatement struct { + Revocations []*jsonRevocation `json:"revocations"` + Grants []*jsonGrant `json:"grants"` + Expiration time.Time `json:"expiration"` + IssuedAt time.Time `json:"issuedAt"` +} + +func (g *jsonGrant) Grant(statement *Statement) *Grant { + return &Grant{ + Subject: g.Subject, + Permission: g.Permission, + Grantee: g.Grantee, + statement: statement, + } +} + +// Statement represents a set of grants made from a verifiable +// authority. A statement has an expiration associated with it +// set by the authority. +type Statement struct { + jsonStatement + + signature *libtrust.JSONSignature +} + +// IsExpired returns whether the statement has expired +func (s *Statement) IsExpired() bool { + return s.Expiration.Before(time.Now().Add(-10 * time.Second)) +} + +// Bytes returns an indented json representation of the statement +// in a byte array. This value can be written to a file or stream +// without alteration. +func (s *Statement) Bytes() ([]byte, error) { + return s.signature.PrettySignature("signatures") +} + +// LoadStatement loads and verifies a statement from an input stream. +func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + js, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return nil, err + } + payload, err := js.Payload() + if err != nil { + return nil, err + } + var statement Statement + err = json.Unmarshal(payload, &statement.jsonStatement) + if err != nil { + return nil, err + } + + if authority == nil { + _, err = js.Verify() + if err != nil { + return nil, err + } + } else { + _, err = js.VerifyChains(authority) + if err != nil { + return nil, err + } + } + statement.signature = js + + return &statement, nil +} + +// CreateStatements creates and signs a statement from a stream of grants +// and revocations in a JSON array. +func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) + if err != nil { + return nil, err + } + err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) + if err != nil { + return nil, err + } + statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) + statement.jsonStatement.IssuedAt = time.Now().UTC() + + b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + statement.signature, err = libtrust.NewJSONSignature(b) + if err != nil { + return nil, err + } + err = statement.signature.SignWithChain(key, chain) + if err != nil { + return nil, err + } + + return &statement, nil +} + +type statementList []*Statement + +func (s statementList) Len() int { + return len(s) +} + +func (s statementList) Less(i, j int) bool { + return s[i].IssuedAt.Before(s[j].IssuedAt) +} + +func (s statementList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// CollapseStatements returns a single list of the valid statements as well as the +// time when the next grant will expire. +func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { + sorted := make(statementList, 0, len(statements)) + for _, statement := range statements { + if useExpired || !statement.IsExpired() { + sorted = append(sorted, statement) + } + } + sort.Sort(sorted) + + var minExpired time.Time + var grantCount int + roots := map[string]*grantNode{} + for i, statement := range sorted { + if statement.Expiration.Before(minExpired) || i == 0 { + minExpired = statement.Expiration + } + for _, grant := range statement.Grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + g := grant.Grant(statement) + grantCount = grantCount + 1 + + for _, part := range parts { + node, nodeOk := nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + node.grants = append(node.grants, g) + nodes = node.children + } + } + + for _, revocation := range statement.Revocations { + parts := strings.Split(revocation.Grantee, "/") + nodes := roots + + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + break + } + nodes = node.children + } + if node != nil { + for _, grant := range node.grants { + if isSubName(grant.Subject, revocation.Subject) { + grant.Permission = grant.Permission &^ revocation.Revocation + } + } + } + } + } + + retGrants := make([]*Grant, 0, grantCount) + for _, rootNodes := range roots { + retGrants = append(retGrants, rootNodes.grants...) + } + + return retGrants, minExpired, nil +} + +// FilterStatements filters the statements to statements including the given grants. +func FilterStatements(grants []*Grant) ([]*Statement, error) { + statements := map[*Statement]bool{} + for _, grant := range grants { + if grant.statement != nil { + statements[grant.statement] = true + } + } + retStatements := make([]*Statement, len(statements)) + var i int + for statement := range statements { + retStatements[i] = statement + i++ + } + return retStatements, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go new file mode 100644 index 00000000..e5094686 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go @@ -0,0 +1,417 @@ +package trustgraph + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "testing" + "time" + + "github.com/docker/libtrust" + "github.com/docker/libtrust/testutil" +) + +const testStatementExpiration = time.Hour * 5 + +func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + + statement.Grants = make([]*jsonGrant, len(grants)) + for i, grant := range grants { + statement.Grants[i] = &jsonGrant{ + Subject: grant.Subject, + Permission: grant.Permission, + Grantee: grant.Grantee, + } + } + statement.IssuedAt = time.Now() + statement.Expiration = time.Now().Add(testStatementExpiration) + statement.Revocations = make([]*jsonRevocation, 0) + + marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + sig, err := libtrust.NewJSONSignature(marshalled) + if err != nil { + return nil, err + } + err = sig.SignWithChain(key, chain) + if err != nil { + return nil, err + } + statement.signature = sig + + return &statement, nil +} + +func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { + caKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + + parent := ca + parentKey := caKey + chain := make([]*x509.Certificate, chainLen) + for i := chainLen - 1; i > 0; i-- { + intermediatekey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + caPool := x509.NewCertPool() + caPool.AddCert(ca) + + return trustKey, caPool, chain +} + +func TestLoadStatement(t *testing.T) { + grantCount := 4 + grants, _ := createTestKeysAndGrants(grantCount) + + trustKey, caPool, chain := generateTrustChain(t, 6) + + statement, err := generateStatement(grants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementBytes, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + + s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + pool := x509.NewCertPool() + _, err = LoadStatement(bytes.NewReader(statementBytes), pool) + if err == nil { + t.Fatalf("No error thrown verifying without an authority") + } else if _, ok := err.(x509.UnknownAuthorityError); !ok { + t.Fatalf("Unexpected error verifying without authority: %s", err) + } + + s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + badData := make([]byte, len(statementBytes)) + copy(badData, statementBytes) + badData[0] = '[' + _, err = LoadStatement(bytes.NewReader(badData), nil) + if err == nil { + t.Fatalf("No error thrown parsing bad json") + } + + alteredData := make([]byte, len(statementBytes)) + copy(alteredData, statementBytes) + alteredData[30] = '0' + _, err = LoadStatement(bytes.NewReader(alteredData), nil) + if err == nil { + t.Fatalf("No error thrown from bad data") + } +} + +func TestCollapseGrants(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 4) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-6", + Permission: 0x0f, + Grantee: "/user-7", + } + linkGrants[3] = &Grant{ + Subject: "/user-6/sub-project/specific-app", + Permission: 0x0f, + Grantee: "/user-5", + } + trustKey, pool, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 3) + var err error + statements[0], err = generateStatement(grants[0:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[4:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementsCopy := make([]*Statement, len(statements)) + for i, statement := range statements { + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + // Force sort by reversing order + statementsCopy[len(statementsCopy)-i-1] = verifiedStatement + } + statements = statementsCopy + + collapsedGrants, expiration, err := CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g := NewMemoryGraph(collapsedGrants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) + testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) + testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) + + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) + testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) + + // Add revocation grant + statements = append(statements, &Statement{ + jsonStatement{ + IssuedAt: time.Now(), + Expiration: time.Now().Add(testStatementExpiration), + Grants: []*jsonGrant{}, + Revocations: []*jsonRevocation{ + &jsonRevocation{ + Subject: "/user-1", + Revocation: 0x0f, + Grantee: keys[0].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-2", + Revocation: 0x08, + Grantee: keys[1].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-6", + Revocation: 0x0f, + Grantee: "/user-7", + }, + &jsonRevocation{ + Subject: "/user-9", + Revocation: 0x0f, + Grantee: "/user-10", + }, + }, + }, + nil, + }) + + collapsedGrants, expiration, err = CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g = NewMemoryGraph(collapsedGrants) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) +} + +func TestFilterStatements(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 3) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-5", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-7", + Permission: 0x0f, + Grantee: "/user-6", + } + + trustKey, _, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 5) + var err error + statements[0], err = generateStatement(grants[0:2], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[2:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(grants[4:6], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[3], err = generateStatement(grants[6:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[4], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + collapsed, _, err := CollapseStatements(statements, false) + if err != nil { + t.Fatalf("Error collapsing grants: %s", err) + } + + // Filter 1, all 5 statements + filter1, err := FilterStatements(collapsed) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter1) != 5 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) + } + + // Filter 2, one statement + filter2, err := FilterStatements([]*Grant{collapsed[0]}) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter2) != 1 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) + } + + // Filter 3, 2 statements, from graph lookup + g := NewMemoryGraph(collapsed) + lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) + if err != nil { + t.Fatalf("Error looking up grants: %s", err) + } + if len(lookupGrants) != 1 { + t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) + } + if len(lookupGrants[0]) != 2 { + t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) + } + filter3, err := FilterStatements(lookupGrants[0]) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter3) != 2 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) + } + +} + +func TestCreateStatement(t *testing.T) { + grantJSON := bytes.NewReader([]byte(`[ + { + "subject": "/user-2", + "permission": 15, + "grantee": "/user-1" + }, + { + "subject": "/user-7", + "permission": 1, + "grantee": "/user-9" + }, + { + "subject": "/user-3", + "permission": 15, + "grantee": "/user-2" + } +]`)) + revocationJSON := bytes.NewReader([]byte(`[ + { + "subject": "user-8", + "revocation": 12, + "grantee": "user-9" + } +]`)) + + trustKey, pool, chain := generateTrustChain(t, 3) + + statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) + if err != nil { + t.Fatalf("Error creating statement: %s", err) + } + + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error retrieving bytes: %s", err) + } + + verified, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + + if len(verified.Grants) != 3 { + t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) + } + + if len(verified.Revocations) != 1 { + t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/Godeps/_workspace/src/github.com/docker/libtrust/util.go new file mode 100644 index 00000000..d88176cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go new file mode 100644 index 00000000..83b7cfb1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go @@ -0,0 +1,45 @@ +package libtrust + +import ( + "encoding/pem" + "reflect" + "testing" +) + +func TestAddPEMHeadersToKey(t *testing.T) { + pk := &rsaPublicKey{nil, map[string]interface{}{}} + blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} + addPEMHeadersToKey(blk, pk) + + val := pk.GetExtendedField("hosts") + hosts, ok := val.([]string) + if !ok { + t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) + } + expected := []string{"localhost", "127.0.0.1"} + if !reflect.DeepEqual(hosts, expected) { + t.Errorf("hosts(%v), expected %v", hosts, expected) + } +} + +func TestBase64URL(t *testing.T) { + clean := "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2ZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiY3R5IjoiandrK2pzb24ifQ" + + tests := []string{ + clean, // clean roundtrip + "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2\nZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2\nIiwiY3R5IjoiandrK2pzb24ifQ", // with newlines + "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2 \n ZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2 \n IiwiY3R5IjoiandrK2pzb24ifQ", // with newlines and spaces + } + + for i, test := range tests { + b, err := joseBase64UrlDecode(test) + if err != nil { + t.Fatalf("on test %d: %s", i, err) + } + got := joseBase64UrlEncode(b) + + if got != clean { + t.Errorf("expected %q, got %q", clean, got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/machine/log/log.go b/Godeps/_workspace/src/github.com/docker/machine/log/log.go new file mode 100644 index 00000000..e7e900c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/machine/log/log.go @@ -0,0 +1,123 @@ +package log + +import ( + "fmt" + "os" + "strconv" +) + +// Why the interface? We may only want to print to STDOUT and STDERR for now, +// but it won't neccessarily be that way forever. This interface is intended +// to provide a "framework" for a variety of different logging types in the +// future (log to file, log to logstash, etc.) There could be a driver model +// similar to what is done with OS or machine providers. +type Logger interface { + Debug(...interface{}) + Debugf(string, ...interface{}) + + Error(...interface{}) + Errorf(string, ...interface{}) + Errorln(...interface{}) + + Info(...interface{}) + Infof(string, ...interface{}) + Infoln(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + + Print(...interface{}) + Printf(string, ...interface{}) + + Warn(...interface{}) + Warnf(string, ...interface{}) + + WithFields(Fields) Logger +} + +var ( + l = TerminalLogger{} +) + +// TODO: I think this is superflous and can be replaced by one check for if +// debug is on that sets a variable in this module. +func isDebug() bool { + debugEnv := os.Getenv("DEBUG") + if debugEnv != "" { + showDebug, err := strconv.ParseBool(debugEnv) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing boolean value from DEBUG: %s", err) + os.Exit(1) + } + return showDebug + } + return false +} + +type Fields map[string]interface{} + +func Debug(args ...interface{}) { + l.Debug(args...) +} + +func Debugf(fmtString string, args ...interface{}) { + l.Debugf(fmtString, args...) +} + +func Error(args ...interface{}) { + l.Error(args...) +} + +func Errorf(fmtString string, args ...interface{}) { + l.Errorf(fmtString, args...) +} + +func Errorln(args ...interface{}) { + l.Errorln(args...) +} + +func Info(args ...interface{}) { + l.Info(args...) +} + +func Infof(fmtString string, args ...interface{}) { + l.Infof(fmtString, args...) +} + +func Infoln(args ...interface{}) { + l.Infoln(args...) +} + +func Fatal(args ...interface{}) { + l.Fatal(args...) +} + +func Fatalf(fmtString string, args ...interface{}) { + l.Fatalf(fmtString, args...) +} + +func Print(args ...interface{}) { + l.Print(args...) +} + +func Printf(fmtString string, args ...interface{}) { + l.Printf(fmtString, args...) +} + +func Warn(args ...interface{}) { + l.Warn(args...) +} + +func Warnf(fmtString string, args ...interface{}) { + l.Warnf(fmtString, args...) +} + +func WithField(fieldName string, field interface{}) Logger { + return l.WithFields(Fields{ + fieldName: field, + }) +} + +func WithFields(fields Fields) Logger { + return l.WithFields(fields) +} diff --git a/Godeps/_workspace/src/github.com/docker/machine/log/log_test.go b/Godeps/_workspace/src/github.com/docker/machine/log/log_test.go new file mode 100644 index 00000000..349ffe6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/machine/log/log_test.go @@ -0,0 +1,19 @@ +package log + +import "testing" + +func TestTerminalLoggerWithFields(t *testing.T) { + logger := TerminalLogger{} + withFieldsLogger := logger.WithFields(Fields{ + "foo": "bar", + "spam": "eggs", + }) + withFieldsTerminalLogger, ok := withFieldsLogger.(TerminalLogger) + if !ok { + t.Fatal("Type assertion to TerminalLogger failed") + } + expectedOutFields := "\t\t foo=bar spam=eggs" + if withFieldsTerminalLogger.fieldOut != expectedOutFields { + t.Fatalf("Expected %q, got %q", expectedOutFields, withFieldsTerminalLogger.fieldOut) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/machine/log/terminal.go b/Godeps/_workspace/src/github.com/docker/machine/log/terminal.go new file mode 100644 index 00000000..58b6c57e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/machine/log/terminal.go @@ -0,0 +1,129 @@ +package log + +import ( + "fmt" + "os" + "sort" +) + +type TerminalLogger struct { + // fieldOut is used to do log.WithFields correctly + fieldOut string +} + +func (t TerminalLogger) log(args ...interface{}) { + fmt.Print(args...) + fmt.Print(t.fieldOut, "\n") + t.fieldOut = "" +} + +func (t TerminalLogger) logf(fmtString string, args ...interface{}) { + fmt.Printf(fmtString, args...) + fmt.Print(t.fieldOut, "\n") + t.fieldOut = "" +} + +func (t TerminalLogger) err(args ...interface{}) { + fmt.Fprint(os.Stderr, args...) + fmt.Fprint(os.Stderr, t.fieldOut, "\n") + t.fieldOut = "" +} + +func (t TerminalLogger) errf(fmtString string, args ...interface{}) { + fmt.Fprintf(os.Stderr, fmtString, args...) + fmt.Fprint(os.Stderr, t.fieldOut, "\n") + t.fieldOut = "" +} + +func (t TerminalLogger) Debug(args ...interface{}) { + if isDebug() { + t.log(args...) + } +} + +func (t TerminalLogger) Debugf(fmtString string, args ...interface{}) { + if isDebug() { + t.logf(fmtString, args...) + } +} + +func (t TerminalLogger) Error(args ...interface{}) { + t.err(args...) +} + +func (t TerminalLogger) Errorf(fmtString string, args ...interface{}) { + t.errf(fmtString, args...) +} + +func (t TerminalLogger) Errorln(args ...interface{}) { + t.err(args...) +} + +func (t TerminalLogger) Info(args ...interface{}) { + t.log(args...) +} + +func (t TerminalLogger) Infof(fmtString string, args ...interface{}) { + t.logf(fmtString, args...) +} + +func (t TerminalLogger) Infoln(args ...interface{}) { + t.log(args...) +} + +func (t TerminalLogger) Fatal(args ...interface{}) { + t.err(args...) + os.Exit(1) +} + +func (t TerminalLogger) Fatalf(fmtString string, args ...interface{}) { + t.errf(fmtString, args...) + os.Exit(1) +} + +func (t TerminalLogger) Print(args ...interface{}) { + t.log(args...) +} + +func (t TerminalLogger) Printf(fmtString string, args ...interface{}) { + t.logf(fmtString, args...) +} + +func (t TerminalLogger) Warn(args ...interface{}) { + t.log(args...) +} + +func (t TerminalLogger) Warnf(fmtString string, args ...interface{}) { + t.logf(fmtString, args...) +} + +func (t TerminalLogger) WithFields(fields Fields) Logger { + // When the user calls WithFields, we make a string which gets appended + // to the output of the final [Info|Warn|Error] call for the + // descriptive fields. Because WithFields returns the proper Logger + // (with the fieldOut string set correctly), the logrus syntax will + // still work. + kvpairs := []string{} + + // Why the string slice song and dance? Because Go's map iteration + // order is random, we will get inconsistent results if we don't sort + // the fields (or their resulting string K/V pairs, like we have here). + // Otherwise, we couldn't test this reliably. + for k, v := range fields { + kvpairs = append(kvpairs, fmt.Sprintf("%s=%v", k, v)) + } + + sort.Strings(kvpairs) + + // TODO: + // 1. Is this thread-safe? + // 2. Add more tabs? + t.fieldOut = "\t\t" + + for _, s := range kvpairs { + // TODO: Is %v the correct format string here? + t.fieldOut = fmt.Sprintf("%s %s", t.fieldOut, s) + } + + return t +} diff --git a/Godeps/_workspace/src/github.com/docker/machine/utils/b2d.go b/Godeps/_workspace/src/github.com/docker/machine/utils/b2d.go index b18235b8..df130e14 100644 --- a/Godeps/_workspace/src/github.com/docker/machine/utils/b2d.go +++ b/Godeps/_workspace/src/github.com/docker/machine/utils/b2d.go @@ -1,15 +1,18 @@ package utils import ( - "encoding/json" + //"encoding/json" "fmt" "io" "io/ioutil" "net" "net/http" + "net/url" "os" "path/filepath" "time" + + "github.com/docker/machine/log" ) const ( @@ -22,7 +25,9 @@ func defaultTimeout(network, addr string) (net.Conn, error) { func getClient() *http.Client { transport := http.Transport{ - Dial: defaultTimeout, + DisableKeepAlives: true, + Proxy: http.ProxyFromEnvironment, + Dial: defaultTimeout, } client := http.Client{ @@ -33,13 +38,17 @@ func getClient() *http.Client { } type B2dUtils struct { + isoFilename string + commonIsoPath string + imgCachePath string githubApiBaseUrl string githubBaseUrl string } -func NewB2dUtils(githubApiBaseUrl, githubBaseUrl string) *B2dUtils { +func NewB2dUtils(githubApiBaseUrl, githubBaseUrl, isoFilename string) *B2dUtils { defaultBaseApiUrl := "https://api.github.com" defaultBaseUrl := "https://github.com" + imgCachePath := GetMachineCacheDir() if githubApiBaseUrl == "" { githubApiBaseUrl = defaultBaseApiUrl @@ -50,6 +59,9 @@ func NewB2dUtils(githubApiBaseUrl, githubBaseUrl string) *B2dUtils { } return &B2dUtils{ + isoFilename: isoFilename, + imgCachePath: GetMachineCacheDir(), + commonIsoPath: filepath.Join(imgCachePath, isoFilename), githubApiBaseUrl: githubApiBaseUrl, githubBaseUrl: githubBaseUrl, } @@ -58,53 +70,169 @@ func NewB2dUtils(githubApiBaseUrl, githubBaseUrl string) *B2dUtils { // Get the latest boot2docker release tag name (e.g. "v0.6.0"). // FIXME: find or create some other way to get the "latest release" of boot2docker since the GitHub API has a pretty low rate limit on API requests func (b *B2dUtils) GetLatestBoot2DockerReleaseURL() (string, error) { - client := getClient() - apiUrl := fmt.Sprintf("%s/repos/boot2docker/boot2docker/releases", b.githubApiBaseUrl) - rsp, err := client.Get(apiUrl) - if err != nil { - return "", err - } - defer rsp.Body.Close() + //client := getClient() + //apiUrl := fmt.Sprintf("%s/repos/boot2docker/boot2docker/releases", b.githubApiBaseUrl) + //rsp, err := client.Get(apiUrl) + //if err != nil { + // return "", err + //} + //defer rsp.Body.Close() - var t []struct { - TagName string `json:"tag_name"` - } - if err := json.NewDecoder(rsp.Body).Decode(&t); err != nil { - return "", err - } - if len(t) == 0 { - return "", fmt.Errorf("no releases found") - } + //var t []struct { + // TagName string `json:"tag_name"` + // PreRelease bool `json:"prerelease"` + //} + //if err := json.NewDecoder(rsp.Body).Decode(&t); err != nil { + // return "", fmt.Errorf("Error demarshaling the Github API response: %s\nYou may be getting rate limited by Github.", err) + //} + //if len(t) == 0 { + // return "", fmt.Errorf("no releases found") + //} - tag := t[0].TagName - isoUrl := fmt.Sprintf("%s/boot2docker/boot2docker/releases/download/%s/boot2docker.iso", b.githubBaseUrl, tag) - return isoUrl, nil + //// find the latest "released" release (i.e. not pre-release) + //isoUrl := "" + //for _, r := range t { + // if !r.PreRelease { + // tag := r.TagName + // isoUrl = fmt.Sprintf("%s/boot2docker/boot2docker/releases/download/%s/boot2docker.iso", b.githubBaseUrl, tag) + // break + // } + //} + //return isoUrl, nil + + // TODO: once we decide on the final versioning and location we will + // enable the above "check for latest" + u := fmt.Sprintf("https://s3.amazonaws.com/docker-mcn/public/b2d-next/%s", b.isoFilename) + return u, nil + +} + +func removeFileIfExists(name string) error { + if _, err := os.Stat(name); err == nil { + if err := os.Remove(name); err != nil { + log.Fatalf("Error removing temporary download file: %s", err) + } + } + return nil } // Download boot2docker ISO image for the given tag and save it at dest. -func (b *B2dUtils) DownloadISO(dir, file, url string) error { - client := getClient() - rsp, err := client.Get(url) - if err != nil { - return err +func (b *B2dUtils) DownloadISO(dir, file, isoUrl string) error { + u, err := url.Parse(isoUrl) + var src io.ReadCloser + if u.Scheme == "file" || u.Scheme == "" { + s, err := os.Open(u.Path) + if err != nil { + return err + } + src = s + } else { + client := getClient() + s, err := client.Get(isoUrl) + if err != nil { + return err + } + src = s.Body } - defer rsp.Body.Close() + + defer src.Close() // Download to a temp file first then rename it to avoid partial download. f, err := ioutil.TempFile(dir, file+".tmp") if err != nil { return err } - defer os.Remove(f.Name()) - if _, err := io.Copy(f, rsp.Body); err != nil { + + defer func() { + if err := removeFileIfExists(f.Name()); err != nil { + log.Fatalf("Error removing file: %s", err) + } + }() + + if _, err := io.Copy(f, src); err != nil { // TODO: display download progress? return err } + if err := f.Close(); err != nil { return err } - if err := os.Rename(f.Name(), filepath.Join(dir, file)); err != nil { + + // Dest is the final path of the boot2docker.iso file. + dest := filepath.Join(dir, file) + + // Windows can't rename in place, so remove the old file before + // renaming the temporary downloaded file. + if err := removeFileIfExists(dest); err != nil { return err } + + if err := os.Rename(f.Name(), dest); err != nil { + return err + } + + return nil +} + +func (b *B2dUtils) DownloadLatestBoot2Docker() error { + latestReleaseUrl, err := b.GetLatestBoot2DockerReleaseURL() + if err != nil { + return err + } + + return b.DownloadISOFromURL(latestReleaseUrl) +} + +func (b *B2dUtils) DownloadISOFromURL(latestReleaseUrl string) error { + log.Infof("Downloading %s to %s...", latestReleaseUrl, b.commonIsoPath) + if err := b.DownloadISO(b.imgCachePath, b.isoFilename, latestReleaseUrl); err != nil { + return err + } + + return nil +} + +func (b *B2dUtils) CopyIsoToMachineDir(isoURL, machineName string) error { + machinesDir := GetMachineDir() + machineIsoPath := filepath.Join(machinesDir, machineName, b.isoFilename) + + // just in case the cache dir has been manually deleted, + // check for it and recreate it if it's gone + if _, err := os.Stat(b.imgCachePath); os.IsNotExist(err) { + log.Infof("Image cache does not exist, creating it at %s...", b.imgCachePath) + if err := os.Mkdir(b.imgCachePath, 0700); err != nil { + return err + } + } + + // By default just copy the existing "cached" iso to + // the machine's directory... + if isoURL == "" { + if err := b.copyDefaultIsoToMachine(machineIsoPath); err != nil { + return err + } + } else { + // But if ISO is specified go get it directly + log.Infof("Downloading %s from %s...", b.isoFilename, isoURL) + if err := b.DownloadISO(filepath.Join(machinesDir, machineName), b.isoFilename, isoURL); err != nil { + return err + } + } + + return nil +} + +func (b *B2dUtils) copyDefaultIsoToMachine(machineIsoPath string) error { + if _, err := os.Stat(b.commonIsoPath); os.IsNotExist(err) { + log.Info("No default boot2docker iso found locally, downloading the latest release...") + if err := b.DownloadLatestBoot2Docker(); err != nil { + return err + } + } + + if err := CopyFile(b.commonIsoPath, machineIsoPath); err != nil { + return err + } + return nil } diff --git a/Godeps/_workspace/src/github.com/docker/machine/utils/b2d_test.go b/Godeps/_workspace/src/github.com/docker/machine/utils/b2d_test.go index 2fe5a381..e780a16e 100644 --- a/Godeps/_workspace/src/github.com/docker/machine/utils/b2d_test.go +++ b/Godeps/_workspace/src/github.com/docker/machine/utils/b2d_test.go @@ -1,7 +1,6 @@ package utils import ( - "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -11,20 +10,26 @@ import ( func TestGetLatestBoot2DockerReleaseUrl(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - respText := `[{"tag_name": "0.1"}]` + respText := `[{"tag_name": "0.2", "prerelease": true, "tag_name": "0.1", "prerelease": false}]` w.Write([]byte(respText)) })) defer ts.Close() - b := NewB2dUtils(ts.URL, ts.URL) + b := NewB2dUtils(ts.URL, ts.URL, "virtualbox") isoUrl, err := b.GetLatestBoot2DockerReleaseURL() if err != nil { t.Fatal(err) } - expectedUrl := fmt.Sprintf("%s/boot2docker/boot2docker/releases/download/0.1/boot2docker.iso", ts.URL) - if isoUrl != expectedUrl { - t.Fatalf("expected url %s; received %s", isoUrl) + // TODO: update to release URL once we get the releases worked + // out for b2d-ng + //expectedUrl := fmt.Sprintf("%s/boot2docker/boot2docker/releases/download/0.1/boot2docker.iso", ts.URL) + //if isoUrl != expectedUrl { + // t.Fatalf("expected url %s; received %s", isoUrl) + //} + + if isoUrl == "" { + t.Fatalf("expected a url for the iso") } } @@ -42,7 +47,7 @@ func TestDownloadIso(t *testing.T) { t.Fatal(err) } - b := NewB2dUtils(ts.URL, ts.URL) + b := NewB2dUtils(ts.URL, ts.URL, "") if err := b.DownloadISO(tmpDir, filename, ts.URL); err != nil { t.Fatal(err) } diff --git a/Godeps/_workspace/src/github.com/docker/machine/utils/certs.go b/Godeps/_workspace/src/github.com/docker/machine/utils/certs.go index 70b8a6cd..f2160646 100644 --- a/Godeps/_workspace/src/github.com/docker/machine/utils/certs.go +++ b/Godeps/_workspace/src/github.com/docker/machine/utils/certs.go @@ -7,12 +7,33 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "io/ioutil" "math/big" "net" "os" "time" ) +func getTLSConfig(caCert, cert, key []byte, allowInsecure bool) (*tls.Config, error) { + // TLS config + var tlsConfig tls.Config + tlsConfig.InsecureSkipVerify = allowInsecure + certPool := x509.NewCertPool() + + certPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = certPool + keypair, err := tls.X509KeyPair(cert, key) + if err != nil { + return &tlsConfig, err + } + tlsConfig.Certificates = []tls.Certificate{keypair} + if allowInsecure { + tlsConfig.InsecureSkipVerify = true + } + + return &tlsConfig, nil +} + func newCertificate(org string) (*x509.Certificate, error) { now := time.Now() // need to set notBefore slightly in the past to account for time @@ -34,7 +55,7 @@ func newCertificate(org string) (*x509.Certificate, error) { NotBefore: notBefore, NotAfter: notAfter, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement, BasicConstraintsValid: true, }, nil @@ -51,6 +72,8 @@ func GenerateCACertificate(certFile, keyFile, org string, bits int) error { template.IsCA = true template.KeyUsage |= x509.KeyUsageCertSign + template.KeyUsage |= x509.KeyUsageKeyEncipherment + template.KeyUsage |= x509.KeyUsageKeyAgreement priv, err := rsa.GenerateKey(rand.Reader, bits) if err != nil { @@ -93,13 +116,13 @@ func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org stri } // client if len(hosts) == 1 && hosts[0] == "" { - template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} template.KeyUsage = x509.KeyUsageDigitalSignature } else { // server + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} for _, h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) - } else { template.DNSNames = append(template.DNSNames, h) } @@ -109,13 +132,11 @@ func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org stri tlsCert, err := tls.LoadX509KeyPair(caFile, caKeyFile) if err != nil { return err - } priv, err := rsa.GenerateKey(rand.Reader, bits) if err != nil { return err - } x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0]) @@ -131,7 +152,6 @@ func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org stri certOut, err := os.Create(certFile) if err != nil { return err - } pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) @@ -140,7 +160,6 @@ func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org stri keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err - } pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) @@ -148,3 +167,36 @@ func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile, org stri return nil } + +func ValidateCertificate(addr, caCertPath, serverCertPath, serverKeyPath string) (bool, error) { + caCert, err := ioutil.ReadFile(caCertPath) + if err != nil { + return false, err + } + + serverCert, err := ioutil.ReadFile(serverCertPath) + if err != nil { + return false, err + } + + serverKey, err := ioutil.ReadFile(serverKeyPath) + if err != nil { + return false, err + } + + tlsConfig, err := getTLSConfig(caCert, serverCert, serverKey, false) + if err != nil { + return false, err + } + + dialer := &net.Dialer{ + Timeout: time.Second * 2, + } + + _, err = tls.DialWithDialer(dialer, "tcp", addr, tlsConfig) + if err != nil { + return false, nil + } + + return true, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/machine/utils/certs_test.go b/Godeps/_workspace/src/github.com/docker/machine/utils/certs_test.go index 042f47e0..1201e63d 100644 --- a/Godeps/_workspace/src/github.com/docker/machine/utils/certs_test.go +++ b/Godeps/_workspace/src/github.com/docker/machine/utils/certs_test.go @@ -12,6 +12,8 @@ func TestGenerateCACertificate(t *testing.T) { if err != nil { t.Fatal(err) } + // cleanup + defer os.RemoveAll(tmpDir) os.Setenv("MACHINE_DIR", tmpDir) caCertPath := filepath.Join(tmpDir, "ca.pem") @@ -29,9 +31,6 @@ func TestGenerateCACertificate(t *testing.T) { t.Fatal(err) } os.Setenv("MACHINE_DIR", "") - - // cleanup - _ = os.RemoveAll(tmpDir) } func TestGenerateCert(t *testing.T) { @@ -39,6 +38,8 @@ func TestGenerateCert(t *testing.T) { if err != nil { t.Fatal(err) } + // cleanup + defer os.RemoveAll(tmpDir) os.Setenv("MACHINE_DIR", tmpDir) caCertPath := filepath.Join(tmpDir, "ca.pem") @@ -70,7 +71,4 @@ func TestGenerateCert(t *testing.T) { if _, err := os.Stat(keyPath); err != nil { t.Fatalf("key not created at %s", keyPath) } - - // cleanup - _ = os.RemoveAll(tmpDir) } diff --git a/Godeps/_workspace/src/github.com/docker/machine/utils/utils.go b/Godeps/_workspace/src/github.com/docker/machine/utils/utils.go index fb4bd171..e2ec725d 100644 --- a/Godeps/_workspace/src/github.com/docker/machine/utils/utils.go +++ b/Godeps/_workspace/src/github.com/docker/machine/utils/utils.go @@ -1,10 +1,19 @@ package utils import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" "io" + "net" "os" "path/filepath" "runtime" + "strconv" + "time" + + "github.com/docker/machine/log" ) func GetHomeDir() string { @@ -15,23 +24,27 @@ func GetHomeDir() string { } func GetBaseDir() string { - baseDir := os.Getenv("MACHINE_DIR") + baseDir := os.Getenv("MACHINE_STORAGE_PATH") if baseDir == "" { - baseDir = GetHomeDir() + baseDir = filepath.Join(GetHomeDir(), ".docker", "machine") } return baseDir } func GetDockerDir() string { - return filepath.Join(GetBaseDir(), ".docker") + return filepath.Join(GetHomeDir(), ".docker") } func GetMachineDir() string { - return filepath.Join(GetDockerDir(), "machines") + return filepath.Join(GetBaseDir(), "machines") } -func GetMachineClientCertDir() string { - return filepath.Join(GetMachineDir(), ".client") +func GetMachineCertDir() string { + return filepath.Join(GetBaseDir(), "certs") +} + +func GetMachineCacheDir() string { + return filepath.Join(GetBaseDir(), "cache") } func GetUsername() string { @@ -69,5 +82,89 @@ func CopyFile(src, dst string) error { return err } + fi, err := os.Stat(src) + if err != nil { + return err + } + + if err := os.Chmod(dst, fi.Mode()); err != nil { + return err + } + return nil } + +func WaitForSpecificOrError(f func() (bool, error), maxAttempts int, waitInterval time.Duration) error { + for i := 0; i < maxAttempts; i++ { + stop, err := f() + if err != nil { + return err + } + if stop { + return nil + } + time.Sleep(waitInterval) + } + return fmt.Errorf("Maximum number of retries (%d) exceeded", maxAttempts) +} + +func WaitForSpecific(f func() bool, maxAttempts int, waitInterval time.Duration) error { + return WaitForSpecificOrError(func() (bool, error) { + return f(), nil + }, maxAttempts, waitInterval) +} + +func WaitFor(f func() bool) error { + return WaitForSpecific(f, 60, 3*time.Second) +} + +func WaitForDocker(ip string, daemonPort int) error { + return WaitFor(func() bool { + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", ip, daemonPort)) + if err != nil { + log.Debugf("Daemon not responding yet: %s", err) + return false + } + conn.Close() + return true + }) +} + +func DumpVal(vals ...interface{}) { + for _, val := range vals { + prettyJSON, err := json.MarshalIndent(val, "", " ") + if err != nil { + log.Fatal(err) + } + log.Debug(string(prettyJSON)) + } +} + +// Following two functions are from github.com/docker/docker/utils module. It +// was way overkill to include the whole module, so we just have these bits +// that we're using here. +func TruncateID(id string) string { + shortLen := 12 + if len(id) < shortLen { + shortLen = len(id) + } + return id[:shortLen] +} + +// GenerateRandomID returns an unique id +func GenerateRandomID() string { + for { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + value := hex.EncodeToString(id) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { + continue + } + return value + } +} diff --git a/Godeps/_workspace/src/github.com/docker/machine/utils/utils_test.go b/Godeps/_workspace/src/github.com/docker/machine/utils/utils_test.go index 450c8c07..1b9f4c6c 100644 --- a/Godeps/_workspace/src/github.com/docker/machine/utils/utils_test.go +++ b/Godeps/_workspace/src/github.com/docker/machine/utils/utils_test.go @@ -15,44 +15,34 @@ func TestGetBaseDir(t *testing.T) { homeDir := GetHomeDir() baseDir := GetBaseDir() - if strings.Index(homeDir, baseDir) != 0 { + if strings.Index(baseDir, homeDir) != 0 { t.Fatalf("expected base dir with prefix %s; received %s", homeDir, baseDir) } } func TestGetCustomBaseDir(t *testing.T) { root := "/tmp" - os.Setenv("MACHINE_DIR", root) + os.Setenv("MACHINE_STORAGE_PATH", root) baseDir := GetBaseDir() - if strings.Index(root, baseDir) != 0 { + if strings.Index(baseDir, root) != 0 { t.Fatalf("expected base dir with prefix %s; received %s", root, baseDir) } - os.Setenv("MACHINE_DIR", "") + os.Setenv("MACHINE_STORAGE_PATH", "") } func TestGetDockerDir(t *testing.T) { - root := "/tmp" - os.Setenv("MACHINE_DIR", root) - dockerDir := GetDockerDir() + homeDir := GetHomeDir() + baseDir := GetBaseDir() - if strings.Index(dockerDir, root) != 0 { - t.Fatalf("expected docker dir with prefix %s; received %s", root, dockerDir) + if strings.Index(baseDir, homeDir) != 0 { + t.Fatalf("expected base dir with prefix %s; received %s", homeDir, baseDir) } - - path, filename := path.Split(dockerDir) - if strings.Index(path, root) != 0 { - t.Fatalf("expected base path of %s; received %s", root, path) - } - if filename != ".docker" { - t.Fatalf("expected docker dir \".docker\"; received %s", filename) - } - os.Setenv("MACHINE_DIR", "") } func TestGetMachineDir(t *testing.T) { root := "/tmp" - os.Setenv("MACHINE_DIR", root) + os.Setenv("MACHINE_STORAGE_PATH", root) machineDir := GetMachineDir() if strings.Index(machineDir, root) != 0 { @@ -66,13 +56,13 @@ func TestGetMachineDir(t *testing.T) { if filename != "machines" { t.Fatalf("expected machine dir \"machines\"; received %s", filename) } - os.Setenv("MACHINE_DIR", "") + os.Setenv("MACHINE_STORAGE_PATH", "") } -func TestGetMachineClientCertDir(t *testing.T) { +func TestGetMachineCertDir(t *testing.T) { root := "/tmp" - os.Setenv("MACHINE_DIR", root) - clientDir := GetMachineClientCertDir() + os.Setenv("MACHINE_STORAGE_PATH", root) + clientDir := GetMachineCertDir() if strings.Index(clientDir, root) != 0 { t.Fatalf("expected machine client cert dir with prefix %s; received %s", root, clientDir) @@ -82,10 +72,10 @@ func TestGetMachineClientCertDir(t *testing.T) { if strings.Index(path, root) != 0 { t.Fatalf("expected base path of %s; received %s", root, path) } - if filename != ".client" { - t.Fatalf("expected machine client dir \".client\"; received %s", filename) + if filename != "certs" { + t.Fatalf("expected machine client dir \"certs\"; received %s", filename) } - os.Setenv("MACHINE_DIR", "") + os.Setenv("MACHINE_STORAGE_PATH", "") } func TestCopyFile(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile new file mode 100644 index 00000000..0948dcfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile @@ -0,0 +1,15 @@ +# this file describes how to build tsuru python image +# to run it: +# 1- install docker +# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile + +from base:ubuntu-quantal +run apt-get install wget -y --force-yes +run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate +run mkdir /var/lib/tsuru +run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1 +run cp /var/lib/tsuru/python/deploy /var/lib/tsuru +run cp /var/lib/tsuru/base/restart /var/lib/tsuru +run cp /var/lib/tsuru/base/start /var/lib/tsuru +run /var/lib/tsuru/base/install +run /var/lib/tsuru/base/setup diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 00000000..6796581f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/gorilla/context/README.md new file mode 100644 index 00000000..c60a31b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/gorilla/context/context.go new file mode 100644 index 00000000..81cb128b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go new file mode 100644 index 00000000..9814c501 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/gorilla/context/doc.go new file mode 100644 index 00000000..73c74003 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 00000000..d87d4657 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/gorilla/mux/README.md new file mode 100644 index 00000000..e60301b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go new file mode 100644 index 00000000..c5f97b2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go new file mode 100644 index 00000000..442babab --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go @@ -0,0 +1,206 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go new file mode 100644 index 00000000..e2532309 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go @@ -0,0 +1,465 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go new file mode 100644 index 00000000..ba47727c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,1195 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/context" +) + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/a"), + vars: map[string]string{"category": "a"}, + host: "", + path: "/a", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/b/c"), + vars: map[string]string{"category": "b/c"}, + host: "", + path: "/b/c", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/b/c/product_name/1"), + vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, + host: "", + path: "/b/c/product_name/1", + shouldMatch: true, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).Headers("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).HeadersRegexp("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?bar=2&foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value and no parameter in request, should not match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value and empty parameter in request, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with overlapping value, should not match", + route: new(Route).Queries("foo", "bar"), + request: newRequest("GET", "http://localhost?foo=barfoo"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with no parameter in request, should not match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty parameter in request, should match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{"foo": ""}, + host: "", + path: "", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestBuildVarsFunc(t *testing.T) { + tests := []routeTest{ + { + title: "BuildVarsFunc set on route", + route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "3" + vars["v2"] = "a" + return vars + }), + request: newRequest("GET", "http://localhost/111/2"), + path: "/111/3a", + shouldMatch: true, + }, + { + title: "BuildVarsFunc set on route and parent route", + route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "2" + return vars + }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v2"] = "b" + return vars + }), + request: newRequest("GET", "http://localhost/1/a"), + path: "/2/b", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestWalkSingleDepth(t *testing.T) { + r0 := NewRouter() + r1 := NewRouter() + r2 := NewRouter() + + r0.Path("/g") + r0.Path("/o") + r0.Path("/d").Handler(r1) + r0.Path("/r").Handler(r2) + r0.Path("/a") + + r1.Path("/z") + r1.Path("/i") + r1.Path("/l") + r1.Path("/l") + + r2.Path("/i") + r2.Path("/l") + r2.Path("/l") + + paths := []string{"g", "o", "r", "i", "l", "l", "a"} + depths := []int{0, 0, 0, 1, 1, 1, 0} + i := 0 + err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { + matcher := route.matchers[0].(*routeRegexp) + if matcher.template == "/d" { + return SkipRouter + } + if len(ancestors) != depths[i] { + t.Errorf(`Expected depth of %d at i = %d; got "%s"`, depths[i], i, len(ancestors)) + } + if matcher.template != "/"+paths[i] { + t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) + } + i++ + return nil + }) + if err != nil { + panic(err) + } + if i != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), i) + } +} + +func TestWalkNested(t *testing.T) { + router := NewRouter() + + g := router.Path("/g").Subrouter() + o := g.PathPrefix("/o").Subrouter() + r := o.PathPrefix("/r").Subrouter() + i := r.PathPrefix("/i").Subrouter() + l1 := i.PathPrefix("/l").Subrouter() + l2 := l1.PathPrefix("/l").Subrouter() + l2.Path("/a") + + paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} + idx := 0 + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + path := paths[idx] + tpl := route.regexp.path.template + if tpl != path { + t.Errorf(`Expected %s got %s`, path, tpl) + } + idx++ + return nil + }) + if err != nil { + panic(err) + } + if idx != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), idx) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go new file mode 100644 index 00000000..1f7c190c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go new file mode 100644 index 00000000..7c636d0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go @@ -0,0 +1,295 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]*" + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.matchQueryString(req) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getUrlQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getUrlQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getUrlQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/gorilla/mux/route.go new file mode 100644 index 00000000..75481b57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/route.go @@ -0,0 +1,603 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// Alternatively, you can provide a regular expression and match the header as follows: +// +// r.Headers("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will the same as the previous example, with the addition of matching +// application/text as well. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// Regular expressions can be used with headers as well. +// It accepts a sequence of key/value pairs, where the value has regex support. For example +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libcontainer/user/MAINTAINERS rename to Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup.go rename to Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unix.go rename to Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unsupported.go rename to Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go similarity index 93% rename from Godeps/_workspace/src/github.com/docker/libcontainer/user/user.go rename to Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go index 13226dbf..964e31bf 100644 --- a/Godeps/_workspace/src/github.com/docker/libcontainer/user/user.go +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go @@ -349,17 +349,12 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( return user, nil } -// GetAdditionalGroupsPath looks up a list of groups by name or group id -// against the group file. If a group name cannot be found, an error will be -// returned. If a group id cannot be found, it will be returned as-is. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - groupReader, err := os.Open(groupPath) - if err != nil { - return nil, fmt.Errorf("Failed to open group file: %v", err) - } - defer groupReader.Close() - - groups, err := ParseGroupFilter(groupReader, func(g Group) bool { +// GetAdditionalGroups looks up a list of groups by name or group id against +// against the given /etc/group formatted data. If a group name cannot be found, +// an error will be returned. If a group id cannot be found, it will be returned +// as-is. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + groups, err := ParseGroupFilter(group, func(g Group) bool { for _, ag := range additionalGroups { if g.Name == ag || strconv.Itoa(g.Gid) == ag { return true @@ -405,3 +400,14 @@ func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int } return gids, nil } + +// Wrapper around GetAdditionalGroups that opens the groupPath given and gives +// it as an argument to GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + group, err := os.Open(groupPath) + if err != nil { + return nil, fmt.Errorf("Failed to open group file: %v", err) + } + defer group.Close() + return GetAdditionalGroups(additionalGroups, group) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/user_test.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user_test.go similarity index 96% rename from Godeps/_workspace/src/github.com/docker/libcontainer/user/user_test.go rename to Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user_test.go index ffb0760e..0e37ac3d 100644 --- a/Godeps/_workspace/src/github.com/docker/libcontainer/user/user_test.go +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user_test.go @@ -1,9 +1,7 @@ package user import ( - "fmt" "io" - "io/ioutil" "reflect" "sort" "strconv" @@ -355,7 +353,7 @@ this is just some garbage data } } -func TestGetAdditionalGroupsPath(t *testing.T) { +func TestGetAdditionalGroups(t *testing.T) { const groupContent = ` root:x:0:root adm:x:43: @@ -419,14 +417,9 @@ this is just some garbage data } for _, test := range tests { - tmpFile, err := ioutil.TempFile("", "get-additional-groups-path") - if err != nil { - t.Error(err) - } - fmt.Fprint(tmpFile, groupContent) - tmpFile.Close() + group := strings.NewReader(groupContent) - gids, err := GetAdditionalGroupsPath(test.groups, tmpFile.Name()) + gids, err := GetAdditionalGroups(test.groups, group) if test.hasError && err == nil { t.Errorf("Parse(%#v) expects error but has none", test) continue diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.dockerignore b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.dockerignore new file mode 100644 index 00000000..222a0284 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.dockerignore @@ -0,0 +1,2 @@ +base-image/build +base-image/cache diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.drone.yml b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.drone.yml new file mode 100644 index 00000000..d99f3244 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.drone.yml @@ -0,0 +1,3 @@ +image: rancher/dind:v0.5.0 +script: + - ./scripts/ci diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.gitignore b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.gitignore new file mode 100644 index 00000000..db1dbab7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.gitignore @@ -0,0 +1,7 @@ +/dist +/build +/assets +/base-image/assets/ +/base-image/build/ +/base-image/cache/ +/base-image/dist/ diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.wrap-docker-args b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.wrap-docker-args new file mode 100644 index 00000000..3c1a8ef1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/.wrap-docker-args @@ -0,0 +1 @@ +--privileged diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile new file mode 100644 index 00000000..9710976b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile @@ -0,0 +1,8 @@ +FROM scratch +ADD build/base-files.tar.gz / +COPY build/ca-certificates.crt /usr/etc/ssl/certs/ +COPY build/dockerlaunch /usr/bin/ +COPY build/docker /usr/bin/docker +VOLUME /var/lib/docker +ENTRYPOINT ["/usr/bin/dockerlaunch", "/usr/bin/docker"] +CMD ["-d", "-s", "overlay"] diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile.wrap b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile.wrap new file mode 100644 index 00000000..0b01790e --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Dockerfile.wrap @@ -0,0 +1,3 @@ +FROM rancher/dind:v0.5.0 +WORKDIR /source + diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Godeps.json new file mode 100644 index 00000000..0d2ea0a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Godeps.json @@ -0,0 +1,45 @@ +{ + "ImportPath": "github.com/rancher/docker-from-scratch", + "GoVersion": "go1.4.1", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.8.4", + "Rev": "3cb248e9df77413d58a6330dde84236d04c197d5" + }, + { + "ImportPath": "github.com/docker/docker/pkg/ioutils", + "Comment": "v1.4.1-4890-ga3857cc", + "Rev": "a3857cc3799e2a92b92f07842c03b442490f01ab" + }, + { + "ImportPath": "github.com/docker/docker/pkg/mount", + "Comment": "v1.4.1-4890-ga3857cc", + "Rev": "a3857cc3799e2a92b92f07842c03b442490f01ab" + }, + { + "ImportPath": "github.com/docker/libnetwork/resolvconf", + "Comment": "v0.2-299-gf1c5671", + "Rev": "f1c5671f1ee2133055144e566cd8b3a0ae4f0433" + }, + { + "ImportPath": "github.com/j-keck/arping", + "Rev": "4f4d2c8983a18e2c9c63a3f339bc9a998c4557bc" + }, + { + "ImportPath": "github.com/rancher/netconf", + "Rev": "7dbbacc56c018f16f9123d5ea8cee6e174d44c8b" + }, + { + "ImportPath": "github.com/ryanuber/go-glob", + "Rev": "0067a9abd927e50aed5190662702f81231413ae0" + }, + { + "ImportPath": "github.com/vishvananda/netlink", + "Rev": "ea0402b9dbdee2126f48508a441835ddcabc7d1e" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Readme b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/LICENSE b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/LICENSE new file mode 100644 index 00000000..e454a525 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/LICENSE @@ -0,0 +1,178 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/README.md b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/README.md new file mode 100644 index 00000000..588d751d --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/README.md @@ -0,0 +1,101 @@ +# Docker `FROM scratch` + +Docker-in-Docker image based off of the empty image `scratch`. Only the bare minimum required files are included to make Docker run. This image weighs in around 25MB expanded. + +## Running + +### Overlay + +```bash +# Daemon +docker run --name daemon --privileged -d rancher/docker + +# Client +docker exec -it daemon docker ps +``` + +### Aufs + +```bash +# Daemon +docker run --name daemon --privileged -d rancher/docker -d -s aufs + +# Client +docker exec -it daemon docker ps +``` + +## Embed in Custom Image + +Since docker-from-scratch doesn't assume a base Linux distro it can be easily copied into an other image to add Docker-in-Docker capabilities + +```bash +docker export $(docker create rancher/docker) > files.tar + +cat > Dockerfile << EOF + +FROM ubuntu +ADD files.tar / +ENTRYPOINT ["/usr/bin/dockerlaunch", "/usr/bin/docker"] +VOLUME /var/lib/docker +CMD ["-d", "-s", "overlay"] + +EOF + +docker build -t custom-dind . +``` + +## Graph Driver Compatibility + +This image is really designed to run with overlay. Aufs is known to work but other graph drivers may not work properly or be missing userspace programs needed. + + +## Seriously, Why? + +This code and the supporting files were extracted out of RancherOS into a separate library and are still used by RancherOS. RancherOS runs Docker as a PID 1 but before we can exec Docker we need to setup a minimal environment for Docker in which to run. Since RancherOS is executed by the kernel there is absolutely nothing setup in the system. At Rancher we wrote a small amount of code to setup all the required mounts and directories to launch Docker. + +We moved this code out into a separate project for two reasons. First was simply that we wanted to clean up and modularize the RancherOS code base. Second is that we wanted to demonstrate clearly what exactly Docker requires from the Linux user space. For the most part Docker requires the standard mounts (`/proc`, `/sys`, `/run`, `/var/run`, etc) and the cgroup mounts in `/sys/fs/cgroup` plus the following programs/files: + + +``` +/etc/ssl/certs/ca-certificates.crt +/usr/bin/modprobe +/usr/bin/iptables +/usr/bin/ssh +/usr/bin/xz +/usr/bin/git +/usr/bin/ps +/usr/libexec/git-core/git-clone +/usr/libexec/git-core/git-submodule +/usr/libexec/git-core/git-checkout +``` + +This list can be reduced to a bare minimum if you ignore certain features of Docker. A full description of why each program is needed is below. + +File | Description | Can it be ignored +-----|-------------|------------------ +`/etc/ssl/certs/ca-certificates.crt` | Used as the CA roots to validate SSL connections | No +`/usr/bin/modprobe` | Used to ensure that bridge, nf_nat, br_netfilter, aufs, or overlay modules are loaded. Additionally iptables loads kernel modules based on the configuration of the rules | Yes, just load the modules from the host that you will need. +`/usr/bin/iptables` | Docker uses IPtables to setup networking | Yes, add `--iptables=false` to the `docker -d` command. Networking will have to be manually configured in this situation +`/usr/bin/ssh`| Used by git to clone repos over SSH | Yes, don't use git based Docker builds +`/usr/bin/xz` | Used to extract *legacy* Docker images that were compressed with xz | Yes, only use newer images. Most popular images are not based on xz +`/usr/bin/git` | Used to do Docker builds from a git URL | Yes, don't use git based Docker builds +`/usr/bin/ps` | `docker ps` uses the host `ps` to get information about the running process in a container | No +`/usr/libexec/git-core/git-clone`| Used by git | Yes, don't use git based Docker builds +`/usr/libexec/git-core/git-submodule`| Used by git | Yes, don't use git based Docker builds +`/usr/libexec/git-core/git-checkout`| Used by git | Yes, don't use git based Docker builds + +## Custom Bridge Name + +If you want to run with a custom bridge name you must pass both `--bip` and `-b` as arguments. Normally this would be an error for Docker but in this situation the docker-from-scratch container will create the bridge device with the IP address specified and then old pass `-b` to Docker. + +# Troubleshooting + +## Weird module loading errors + +For various reasons Docker or iptables may try to load a kernel module. You can either manually load all the needed modules from the host or you can bind mount in the kernel modules by adding `-v /lib/modules/$(uname -r)/lib/modules/$(uname -r)` to your `docker run` command + +## Debug Logging + +To enable debug logging on the startup of docker-from-scrach just add `-e DOCKER_LAUNCH_DEBUG=true` to the `docker run` command. For example: + + docker run --name daemon --privileged -d -e DOCKER_LAUNCH_DEBUG=true rancher/docker diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.dockerignore b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.dockerignore new file mode 100644 index 00000000..2dad0d04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.dockerignore @@ -0,0 +1,5 @@ +build +dist +.buildroot-ccache +.kernel-ccache +.dl diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.no-chown b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/.no-chown new file mode 100644 index 00000000..e69de29b diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/Dockerfile b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/Dockerfile new file mode 100644 index 00000000..6c7caaa7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/Dockerfile @@ -0,0 +1,5 @@ +FROM ubuntu:14.04.2 +COPY ./scripts/bootstrap /scripts/ +RUN ./scripts/bootstrap +COPY ./scripts/ /source/scripts/ +COPY ./config/ /source/config/ diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/build.sh b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/build.sh new file mode 100644 index 00000000..8d663ce9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/build.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +IMAGE=${IMAGE:-dockerscratch-files} + +docker build -t $IMAGE . + +if [ -z "$NO_BIND" ] && [ "$(uname)" == "Linux" ]; then + mkdir -p cache + ARGS="-v $(pwd):/source -u $(id -u) -e HOME=/root -v $(pwd)/cache:/root" +fi + +ID=$(docker run -itd $ARGS $IMAGE /source/scripts/build) +trap "docker rm -fv $ID" exit + +docker attach $ID +docker wait $ID + +mkdir -p dist +docker cp $ID:/source/dist/base-files.tar.gz dist + +echo Done diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/buildroot-config-static b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/buildroot-config-static new file mode 100644 index 00000000..1ccfaffa --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/buildroot-config-static @@ -0,0 +1,2230 @@ +# +# Automatically generated file; DO NOT EDIT. +# Buildroot 2015.02 Configuration +# +BR2_HAVE_DOT_CONFIG=y + +# +# Target options +# +BR2_ARCH_IS_64=y +# BR2_arcle is not set +# BR2_arceb is not set +# BR2_arm is not set +# BR2_armeb is not set +# BR2_aarch64 is not set +# BR2_bfin is not set +# BR2_i386 is not set +# BR2_microblazeel is not set +# BR2_microblazebe is not set +# BR2_mips is not set +# BR2_mipsel is not set +# BR2_mips64 is not set +# BR2_mips64el is not set +# BR2_nios2 is not set +# BR2_powerpc is not set +# BR2_powerpc64 is not set +# BR2_powerpc64le is not set +# BR2_sh is not set +# BR2_sh64 is not set +# BR2_sparc is not set +BR2_x86_64=y +# BR2_xtensa is not set +BR2_ARCH="x86_64" +BR2_ENDIAN="LITTLE" +BR2_GCC_TARGET_ARCH="nocona" +BR2_ARCH_HAS_ATOMICS=y +BR2_BINFMT_SUPPORTS_SHARED=y +BR2_BINFMT_ELF=y +BR2_X86_CPU_HAS_MMX=y +BR2_X86_CPU_HAS_SSE=y +BR2_X86_CPU_HAS_SSE2=y +BR2_X86_CPU_HAS_SSE3=y +BR2_x86_nocona=y +# BR2_x86_core2 is not set +# BR2_x86_corei7 is not set +# BR2_x86_atom is not set +# BR2_x86_opteron is not set +# BR2_x86_opteron_sse3 is not set +# BR2_x86_barcelona is not set +# BR2_x86_jaguar is not set + +# +# Build options +# + +# +# Commands +# +BR2_WGET="wget --passive-ftp -nd -t 3" +BR2_SVN="svn" +BR2_BZR="bzr" +BR2_GIT="git" +BR2_CVS="cvs" +BR2_LOCALFILES="cp" +BR2_SCP="scp" +BR2_SSH="ssh" +BR2_HG="hg" +BR2_ZCAT="gzip -d -c" +BR2_BZCAT="bzcat" +BR2_XZCAT="xzcat" +BR2_TAR_OPTIONS="" +BR2_DEFCONFIG="$(CONFIG_DIR)/defconfig" +BR2_DL_DIR="$(HOME)/.dl" +BR2_HOST_DIR="$(BASE_DIR)/host" + +# +# Mirrors and Download locations +# +BR2_PRIMARY_SITE="" +BR2_BACKUP_SITE="http://sources.buildroot.net" +BR2_KERNEL_MIRROR="https://www.kernel.org/pub" +BR2_GNU_MIRROR="http://ftp.gnu.org/pub/gnu" +BR2_LUAROCKS_MIRROR="http://rocks.moonscript.org" +BR2_CPAN_MIRROR="http://cpan.metacpan.org" +BR2_JLEVEL=0 +BR2_CCACHE=y +BR2_CCACHE_DIR="$(HOME)/.buildroot-ccache" +BR2_CCACHE_INITIAL_SETUP="" +# BR2_DEPRECATED is not set +# BR2_ENABLE_DEBUG is not set +BR2_STRIP_strip=y +# BR2_STRIP_none is not set +BR2_STRIP_EXCLUDE_FILES="" +BR2_STRIP_EXCLUDE_DIRS="" +# BR2_OPTIMIZE_0 is not set +# BR2_OPTIMIZE_1 is not set +# BR2_OPTIMIZE_2 is not set +# BR2_OPTIMIZE_3 is not set +BR2_OPTIMIZE_S=y + +# +# enabling Stack Smashing Protection requires support in the toolchain +# +BR2_STATIC_LIBS=y +# BR2_SHARED_LIBS is not set +# BR2_SHARED_STATIC_LIBS is not set +BR2_PACKAGE_OVERRIDE_FILE="$(CONFIG_DIR)/local.mk" +BR2_GLOBAL_PATCH_DIR="" + +# +# Advanced +# +# BR2_COMPILER_PARANOID_UNSAFE_PATH is not set + +# +# Toolchain +# +BR2_TOOLCHAIN=y +BR2_TOOLCHAIN_USES_UCLIBC=y +BR2_TOOLCHAIN_BUILDROOT=y +# BR2_TOOLCHAIN_EXTERNAL is not set +BR2_TOOLCHAIN_BUILDROOT_VENDOR="buildroot" + +# +# Kernel Header Options +# +# BR2_KERNEL_HEADERS_3_2 is not set +# BR2_KERNEL_HEADERS_3_4 is not set +# BR2_KERNEL_HEADERS_3_10 is not set +# BR2_KERNEL_HEADERS_3_12 is not set +# BR2_KERNEL_HEADERS_3_14 is not set +# BR2_KERNEL_HEADERS_3_17 is not set +BR2_KERNEL_HEADERS_3_18=y +# BR2_KERNEL_HEADERS_VERSION is not set +BR2_DEFAULT_KERNEL_HEADERS="3.18.6" +BR2_TOOLCHAIN_BUILDROOT_UCLIBC=y + +# +# (e)glibc only available with shared lib support +# +# BR2_TOOLCHAIN_BUILDROOT_MUSL is not set +BR2_TOOLCHAIN_BUILDROOT_LIBC="uclibc" +BR2_PACKAGE_UCLIBC=y + +# +# uClibc Options +# +BR2_UCLIBC_VERSION_0_9_33=y +# BR2_UCLIBC_VERSION_SNAPSHOT is not set +BR2_UCLIBC_VERSION_STRING="0.9.33.2" +BR2_UCLIBC_CONFIG="package/uclibc/uClibc-0.9.33.config" +BR2_TOOLCHAIN_BUILDROOT_LARGEFILE=y +BR2_TOOLCHAIN_BUILDROOT_INET_IPV6=y +# BR2_TOOLCHAIN_BUILDROOT_INET_RPC is not set +BR2_TOOLCHAIN_BUILDROOT_WCHAR=y +# BR2_TOOLCHAIN_BUILDROOT_LOCALE is not set +# BR2_PTHREADS_NONE is not set +# BR2_PTHREADS is not set +# BR2_PTHREADS_OLD is not set +BR2_PTHREADS_NATIVE=y +# BR2_PTHREAD_DEBUG is not set +# BR2_TOOLCHAIN_BUILDROOT_USE_SSP is not set +BR2_UCLIBC_INSTALL_UTILS=y +# BR2_UCLIBC_INSTALL_TEST_SUITE is not set +BR2_UCLIBC_TARGET_ARCH="x86_64" + +# +# Binutils Options +# +BR2_BINUTILS_VERSION_2_22=y +# BR2_BINUTILS_VERSION_2_23_2 is not set +# BR2_BINUTILS_VERSION_2_24 is not set +# BR2_BINUTILS_VERSION_2_25 is not set +BR2_BINUTILS_VERSION="2.22" +BR2_BINUTILS_EXTRA_CONFIG_OPTIONS="" + +# +# GCC Options +# +BR2_GCC_NEEDS_MPC=y +BR2_GCC_SUPPORTS_GRAPHITE=y +# BR2_GCC_VERSION_4_5_X is not set +# BR2_GCC_VERSION_4_7_X is not set +BR2_GCC_VERSION_4_8_X=y +# BR2_GCC_VERSION_4_9_X is not set +BR2_GCC_SUPPORTS_FINEGRAINEDMTUNE=y +BR2_GCC_VERSION="4.8.4" +BR2_EXTRA_GCC_CONFIG_OPTIONS="" +# BR2_TOOLCHAIN_BUILDROOT_CXX is not set +BR2_GCC_ENABLE_TLS=y +# BR2_GCC_ENABLE_OPENMP is not set +# BR2_GCC_ENABLE_LIBMUDFLAP is not set +# BR2_GCC_ENABLE_GRAPHITE is not set +# BR2_PACKAGE_HOST_GDB is not set +BR2_LARGEFILE=y +BR2_INET_IPV6=y +BR2_USE_WCHAR=y +BR2_TOOLCHAIN_HAS_THREADS=y +BR2_TOOLCHAIN_HAS_THREADS_NPTL=y +BR2_TOOLCHAIN_HAS_SHADOW_PASSWORDS=y +# BR2_ENABLE_LOCALE_PURGE is not set +BR2_NEEDS_GETTEXT=y +BR2_USE_MMU=y +BR2_TARGET_OPTIMIZATION="-pipe" +BR2_TARGET_LDFLAGS="" +# BR2_ECLIPSE_REGISTER is not set +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_0=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_1=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_2=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_3=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_4=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_5=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_6=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_7=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_8=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_9=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_10=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_11=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_12=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_13=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_14=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_15=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_16=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_17=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST_3_18=y +BR2_TOOLCHAIN_HEADERS_AT_LEAST="3.18" + +# +# System configuration +# +BR2_TARGET_GENERIC_HOSTNAME="buildroot" +BR2_TARGET_GENERIC_ISSUE="Welcome to Buildroot" +# BR2_TARGET_GENERIC_PASSWD_DES is not set +BR2_TARGET_GENERIC_PASSWD_MD5=y +# BR2_TARGET_GENERIC_PASSWD_SHA256 is not set +# BR2_TARGET_GENERIC_PASSWD_SHA512 is not set +BR2_TARGET_GENERIC_PASSWD_METHOD="md5" +BR2_INIT_BUSYBOX=y +# BR2_INIT_SYSV is not set + +# +# systemd needs an (e)glibc toolchain, headers >= 3.7 +# +# BR2_INIT_NONE is not set +# BR2_ROOTFS_DEVICE_CREATION_STATIC is not set +BR2_ROOTFS_DEVICE_CREATION_DYNAMIC_DEVTMPFS=y +# BR2_ROOTFS_DEVICE_CREATION_DYNAMIC_MDEV is not set + +# +# eudev needs a toolchain w/ largefile, wchar, dynamic library, headers >= 3.9 +# +BR2_ROOTFS_DEVICE_TABLE="system/device_table.txt" +BR2_ROOTFS_SKELETON_DEFAULT=y +# BR2_ROOTFS_SKELETON_CUSTOM is not set +BR2_TARGET_GENERIC_ROOT_PASSWD="" +BR2_SYSTEM_BIN_SH_BUSYBOX=y + +# +# bash, dash, zsh need BR2_PACKAGE_BUSYBOX_SHOW_OTHERS +# +# BR2_SYSTEM_BIN_SH_NONE is not set +BR2_SYSTEM_BIN_SH="/bin/busybox" +BR2_TARGET_GENERIC_GETTY=y + +# +# getty options +# +BR2_TARGET_GENERIC_GETTY_PORT="ttyS0" +# BR2_TARGET_GENERIC_GETTY_BAUDRATE_KEEP is not set +# BR2_TARGET_GENERIC_GETTY_BAUDRATE_9600 is not set +# BR2_TARGET_GENERIC_GETTY_BAUDRATE_19200 is not set +# BR2_TARGET_GENERIC_GETTY_BAUDRATE_38400 is not set +# BR2_TARGET_GENERIC_GETTY_BAUDRATE_57600 is not set +BR2_TARGET_GENERIC_GETTY_BAUDRATE_115200=y +BR2_TARGET_GENERIC_GETTY_BAUDRATE="115200" +BR2_TARGET_GENERIC_GETTY_TERM="vt100" +BR2_TARGET_GENERIC_GETTY_OPTIONS="" +BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW=y +BR2_SYSTEM_DHCP="" +# BR2_TARGET_TZ_INFO is not set +BR2_ROOTFS_USERS_TABLES="" +BR2_ROOTFS_OVERLAY="" +BR2_ROOTFS_POST_BUILD_SCRIPT="" +BR2_ROOTFS_POST_IMAGE_SCRIPT="" + +# +# Kernel +# +# BR2_LINUX_KERNEL is not set + +# +# Target packages +# +BR2_PACKAGE_BUSYBOX=y +BR2_PACKAGE_BUSYBOX_CONFIG="package/busybox/busybox-ps-modprobe-only.config" +# BR2_PACKAGE_BUSYBOX_SHOW_OTHERS is not set +# BR2_PACKAGE_BUSYBOX_WATCHDOG is not set + +# +# Audio and video applications +# +# BR2_PACKAGE_ALSA_UTILS is not set +# BR2_PACKAGE_AUMIX is not set + +# +# bellagio needs a toolchain w/ C++, threads, dynamic library +# +# BR2_PACKAGE_DVDAUTHOR is not set + +# +# espeak needs a toolchain w/ C++, wchar, threads +# +# BR2_PACKAGE_FAAD2 is not set +# BR2_PACKAGE_FFMPEG is not set +# BR2_PACKAGE_FLAC is not set +# BR2_PACKAGE_FLITE is not set +# BR2_PACKAGE_GSTREAMER is not set +# BR2_PACKAGE_GSTREAMER1 is not set + +# +# jack2 needs a toolchain w/ largefile, threads, C++ +# +BR2_PACKAGE_KODI_ARCH_SUPPORTS=y + +# +# kodi needs a toolchain w/ C++, IPv6, largefile, threads, wchar +# + +# +# kodi needs an OpenGL or an openGL ES and EGL backend +# +# BR2_PACKAGE_LAME is not set +# BR2_PACKAGE_LIBVPX is not set +# BR2_PACKAGE_MADPLAY is not set + +# +# mjpegtools needs a toolchain w/ C++, threads +# + +# +# modplugtools needs a toolchain w/ C++ +# + +# +# mpd needs a toolchain w/ C++, threads, wchar +# +# BR2_PACKAGE_MPD_MPC is not set +# BR2_PACKAGE_MPG123 is not set +# BR2_PACKAGE_MPLAYER is not set +# BR2_PACKAGE_MUSEPACK is not set +# BR2_PACKAGE_NCMPC is not set +# BR2_PACKAGE_OPUS_TOOLS is not set +# BR2_PACKAGE_PULSEAUDIO is not set +# BR2_PACKAGE_SOX is not set +# BR2_PACKAGE_TSTOOLS is not set +# BR2_PACKAGE_TWOLAME is not set + +# +# upmpdcli needs a toolchain w/ C++, largefile, threads +# + +# +# vlc needs a uclibc snapshot or (e)glibc toolchain w/ C++, largefile, wchar, threads +# +# BR2_PACKAGE_VORBIS_TOOLS is not set +# BR2_PACKAGE_WAVPACK is not set +# BR2_PACKAGE_YAVTA is not set +# BR2_PACKAGE_YMPD is not set + +# +# Compressors and decompressors +# +# BR2_PACKAGE_BZIP2 is not set +# BR2_PACKAGE_INFOZIP is not set +# BR2_PACKAGE_LZ4 is not set + +# +# lzip needs a toolchain w/ C++, largefile +# +# BR2_PACKAGE_LZOP is not set +BR2_PACKAGE_XZ=y + +# +# Debugging, profiling and benchmark +# + +# +# blktrace needs a (e)glibc or musl toolchain +# + +# +# bonnie++ needs a toolchain w/ C++ +# +# BR2_PACKAGE_CACHE_CALIBRATOR is not set +# BR2_PACKAGE_DHRYSTONE is not set +# BR2_PACKAGE_DMALLOC is not set +# BR2_PACKAGE_DROPWATCH is not set +# BR2_PACKAGE_DSTAT is not set + +# +# duma needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_FIO is not set + +# +# gdb/gdbserver needs a toolchain w/ threads, threads debug +# +BR2_PACKAGE_GOOGLE_BREAKPAD_ARCH_SUPPORTS=y + +# +# google-breakpad requires an (e)glibc toolchain w/ C++ enabled +# +# BR2_PACKAGE_IOZONE is not set +# BR2_PACKAGE_KEXEC is not set + +# +# ktap needs a Linux kernel to be built +# +# BR2_PACKAGE_LATENCYTOP is not set +# BR2_PACKAGE_LMBENCH is not set + +# +# ltp-testsuite needs a toolchain w/ IPv6, RPC, largefile, threads +# + +# +# ltrace needs toolchain w/ largefile, wchar, dynamic library +# + +# +# lttng-babeltrace needs a uclibc snapshot or (e)glib toolchain w/ wchar, threads, largefile +# + +# +# lttng-modules needs a Linux kernel to be built +# +# BR2_PACKAGE_LTTNG_TOOLS is not set +# BR2_PACKAGE_MCELOG is not set +# BR2_PACKAGE_MEMSTAT is not set +# BR2_PACKAGE_NETPERF is not set + +# +# oprofile needs a toolchain w/ C++, wchar +# +# BR2_PACKAGE_PAX_UTILS is not set + +# +# perf needs a toolchain w/ largefile and a Linux kernel to be built +# +# BR2_PACKAGE_PV is not set +# BR2_PACKAGE_RAMSMP is not set +# BR2_PACKAGE_RAMSPEED is not set +# BR2_PACKAGE_RT_TESTS is not set +# BR2_PACKAGE_SPIDEV_TEST is not set +# BR2_PACKAGE_STRACE is not set +# BR2_PACKAGE_STRESS is not set +# BR2_PACKAGE_SYSPROF is not set +# BR2_PACKAGE_TINYMEMBENCH is not set + +# +# trace-cmd needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_TRINITY is not set +# BR2_PACKAGE_VALGRIND is not set +# BR2_PACKAGE_WHETSTONE is not set + +# +# Development tools +# +# BR2_PACKAGE_BINUTILS is not set +# BR2_PACKAGE_BSDIFF is not set + +# +# cppunit needs a toolchain w/ C++, dynamic library +# +# BR2_PACKAGE_CVS is not set +# BR2_PACKAGE_FLEX is not set +# BR2_PACKAGE_GETTEXT is not set +BR2_PACKAGE_GIT=y + +# +# gperf needs a toolchain w/ C++ +# +# BR2_PACKAGE_JQ is not set +# BR2_PACKAGE_LIBTOOL is not set +# BR2_PACKAGE_MAKE is not set +# BR2_PACKAGE_PKGCONF is not set +# BR2_PACKAGE_SSTRIP is not set +# BR2_PACKAGE_TREE is not set +# BR2_PACKAGE_YASM is not set + +# +# Filesystem and flash utilities +# +# BR2_PACKAGE_BTRFS_PROGS is not set +# BR2_PACKAGE_CIFS_UTILS is not set +# BR2_PACKAGE_CRAMFS is not set + +# +# curlftpfs needs a toolchain w/ largefile, wchar, threads, dynamic library +# +# BR2_PACKAGE_DOSFSTOOLS is not set +# BR2_PACKAGE_E2FSPROGS is not set +# BR2_PACKAGE_E2TOOLS is not set + +# +# ecryptfs-utils needs a toolchain w/ largefile, threads, wchar, dynamic library +# + +# +# exfat needs a toolchain w/ largefile, wchar, threads, dynamic library +# +# BR2_PACKAGE_EXFAT_UTILS is not set +# BR2_PACKAGE_F2FS_TOOLS is not set +# BR2_PACKAGE_FLASHBENCH is not set +# BR2_PACKAGE_GENEXT2FS is not set +# BR2_PACKAGE_GENPART is not set +# BR2_PACKAGE_GENROMFS is not set +# BR2_PACKAGE_MAKEDEVS is not set +# BR2_PACKAGE_MMC_UTILS is not set +# BR2_PACKAGE_MTD is not set +# BR2_PACKAGE_MTOOLS is not set +# BR2_PACKAGE_NFS_UTILS is not set +# BR2_PACKAGE_NTFS_3G is not set + +# +# simicsfs needs a Linux kernel to be built +# +# BR2_PACKAGE_SQUASHFS is not set + +# +# sshfs needs a toolchain w/ largefile, wchar, threads, dynamic library +# + +# +# unionfs needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_XFSPROGS is not set + +# +# Games +# +# BR2_PACKAGE_GNUCHESS is not set +# BR2_PACKAGE_LBREAKOUT2 is not set +# BR2_PACKAGE_LTRIS is not set +# BR2_PACKAGE_OPENTYRIAN is not set +# BR2_PACKAGE_PRBOOM is not set + +# +# Graphic libraries and applications (graphic/text) +# + +# +# Graphic applications +# + +# +# fswebcam needs a toolchain w/ dynamic library +# + +# +# glmark2 needs an OpenGL or an openGL ES and EGL backend provided by mesa3d +# + +# +# glmark2 needs udev /dev management and a toolchain w/ C++, largefile, NPTL +# +# BR2_PACKAGE_GNUPLOT is not set +# BR2_PACKAGE_JHEAD is not set +# BR2_PACKAGE_RRDTOOL is not set + +# +# Graphic libraries +# + +# +# cegui06 needs a toolchain w/ C++, threads, dynamic library +# + +# +# directfb needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_FBDUMP is not set +# BR2_PACKAGE_FBGRAB is not set +# BR2_PACKAGE_FB_TEST_APP is not set + +# +# fbterm needs a toolchain w/ C++, wchar, locale +# +# BR2_PACKAGE_FBV is not set +# BR2_PACKAGE_IMAGEMAGICK is not set + +# +# linux-fusion needs a Linux kernel to be built +# + +# +# mesa3d needs udev /dev management and a toolchain w/ C++, largefile, NPTL +# + +# +# ocrad needs a toolchain w/ C++ +# +# BR2_PACKAGE_PSPLASH is not set +# BR2_PACKAGE_SDL is not set + +# +# Other GUIs +# +# BR2_PACKAGE_EFL is not set + +# +# qt needs a toolchain w/ C++, threads +# +BR2_PACKAGE_QT5_JSCORE_AVAILABLE=y + +# +# Qt5 needs a toolchain w/ wchar, IPv6, NPTL, C++ +# + +# +# weston needs udev and a toolchain w/ threads, dynamic library, headers >= 3.0 +# + +# +# X.org needs a toolchain w/ wchar, threads, dynamic library +# + +# +# X applications +# + +# +# midori needs libgtk2 and a toolchain w/ C++, wchar, threads +# + +# +# X libraries and helper libraries +# +# BR2_PACKAGE_DEJAVU is not set +# BR2_PACKAGE_GHOSTSCRIPT_FONTS is not set +# BR2_PACKAGE_LIBERATION is not set +# BR2_PACKAGE_XKEYBOARD_CONFIG is not set + +# +# X window managers +# + +# +# Hardware handling +# + +# +# Firmware +# +# BR2_PACKAGE_B43_FIRMWARE is not set +# BR2_PACKAGE_LINUX_FIRMWARE is not set +# BR2_PACKAGE_UX500_FIRMWARE is not set +# BR2_PACKAGE_ZD1211_FIRMWARE is not set +# BR2_PACKAGE_ACPID is not set + +# +# avrdude needs a toolchain w/ threads, largefile, wchar, dynamic library +# + +# +# bcache-tools needs udev /dev management and a toolchain w/ largefile, wchar +# +# BR2_PACKAGE_CDRKIT is not set + +# +# cryptsetup needs a toolchain w/ largefile, wchar, threads, dynamic library +# +# BR2_PACKAGE_DBUS is not set +# BR2_PACKAGE_DMIDECODE is not set + +# +# dmraid needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_DTV_SCAN_TABLES is not set +# BR2_PACKAGE_DVB_APPS is not set +# BR2_PACKAGE_DVBSNOOP is not set +# BR2_PACKAGE_EEPROG is not set + +# +# eudev needs eudev /dev management +# + +# +# eudev needs a toolchain w/ largefile, wchar, dynamic library +# +# BR2_PACKAGE_EVEMU is not set +# BR2_PACKAGE_EVTEST is not set +# BR2_PACKAGE_FAN_CTRL is not set +# BR2_PACKAGE_FCONFIG is not set +# BR2_PACKAGE_FIS is not set +# BR2_PACKAGE_FLASHROM is not set +# BR2_PACKAGE_FMTOOLS is not set +# BR2_PACKAGE_FXLOAD is not set +# BR2_PACKAGE_GADGETFS_TEST is not set + +# +# gpm mouse server needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_GPSD is not set + +# +# gptfdisk needs a toolchain w/ largefile, wchar, C++ +# +# BR2_PACKAGE_GVFS is not set +# BR2_PACKAGE_HWDATA is not set +# BR2_PACKAGE_I2C_TOOLS is not set +# BR2_PACKAGE_INPUT_EVENT_DAEMON is not set +# BR2_PACKAGE_INPUT_TOOLS is not set +# BR2_PACKAGE_INTEL_MICROCODE is not set +# BR2_PACKAGE_IOSTAT is not set +# BR2_PACKAGE_IPMITOOL is not set +# BR2_PACKAGE_IRDA_UTILS is not set +# BR2_PACKAGE_IUCODE_TOOL is not set +# BR2_PACKAGE_KBD is not set +# BR2_PACKAGE_LCDPROC is not set +# BR2_PACKAGE_LM_SENSORS is not set + +# +# lshw needs a toolchain w/ C++, largefile, wchar +# +# BR2_PACKAGE_LSUIO is not set + +# +# lvm2 needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_MDADM is not set +# BR2_PACKAGE_MEDIA_CTL is not set +# BR2_PACKAGE_MEMTEST86 is not set +# BR2_PACKAGE_MEMTESTER is not set +# BR2_PACKAGE_MINICOM is not set +# BR2_PACKAGE_NANOCOM is not set +# BR2_PACKAGE_NEARD is not set +# BR2_PACKAGE_OFONO is not set + +# +# ola needs a toolchain w/ C++, threads, largefile, wchar +# +# BR2_PACKAGE_OPEN2300 is not set +# BR2_PACKAGE_OPENOCD is not set + +# +# openpowerlink needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_PARTED is not set +# BR2_PACKAGE_PCIUTILS is not set +# BR2_PACKAGE_PICOCOM is not set +# BR2_PACKAGE_PPS_TOOLS is not set +# BR2_PACKAGE_READ_EDID is not set +# BR2_PACKAGE_RNG_TOOLS is not set +# BR2_PACKAGE_SANE_BACKENDS is not set +# BR2_PACKAGE_SDPARM is not set +# BR2_PACKAGE_SETSERIAL is not set +# BR2_PACKAGE_SG3_UTILS is not set +# BR2_PACKAGE_SIGROK_CLI is not set +# BR2_PACKAGE_SISPMCTL is not set + +# +# smartmontools needs a toolchain w/ C++ +# +# BR2_PACKAGE_SMSTOOLS3 is not set + +# +# snowball-hdmiservice needs a toolchain w/ threads, dynamic library +# +# BR2_PACKAGE_SREDIRD is not set +# BR2_PACKAGE_STATSERIAL is not set +# BR2_PACKAGE_SYSSTAT is not set + +# +# targetcli-fb depends on Python +# +# BR2_PACKAGE_TI_UIM is not set +# BR2_PACKAGE_TI_UTILS is not set +# BR2_PACKAGE_TRIGGERHAPPY is not set +# BR2_PACKAGE_UBOOT_TOOLS is not set + +# +# udisks needs udev /dev management +# + +# +# udisks needs a toolchain w/ wchar, threads, dynamic library +# +# BR2_PACKAGE_USB_MODESWITCH is not set +# BR2_PACKAGE_USB_MODESWITCH_DATA is not set + +# +# usbmount requires udev to be enabled +# + +# +# usbutils needs udev /dev management and toolchain w/ threads +# +# BR2_PACKAGE_W_SCAN is not set +# BR2_PACKAGE_WIPE is not set +# BR2_PACKAGE_XORRISO is not set + +# +# Interpreter languages and scripting +# +# BR2_PACKAGE_ENSCRIPT is not set + +# +# erlang needs a toolchain w/ threads, shared library +# +# BR2_PACKAGE_GUILE is not set +# BR2_PACKAGE_HASERL is not set +# BR2_PACKAGE_JAMVM is not set +# BR2_PACKAGE_JIMTCL is not set +# BR2_PACKAGE_LUA is not set +# BR2_PACKAGE_LUAJIT is not set +BR2_PACKAGE_MONO_ARCH_SUPPORTS=y +# BR2_PACKAGE_MONO is not set + +# +# nodejs needs a toolchain w/ C++, IPv6, largefile, threads +# +# BR2_PACKAGE_PERL is not set +# BR2_PACKAGE_PHP is not set +# BR2_PACKAGE_PYTHON is not set +# BR2_PACKAGE_PYTHON3 is not set + +# +# ruby needs a toolchain w/ wchar, threads, dynamic library +# +# BR2_PACKAGE_TCL is not set + +# +# Libraries +# + +# +# Audio/Sound +# +# BR2_PACKAGE_ALSA_LIB is not set + +# +# audiofile needs a toolchain w/ C++ +# +# BR2_PACKAGE_CELT051 is not set + +# +# fdk-aac needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBAO is not set +# BR2_PACKAGE_LIBCDAUDIO is not set +# BR2_PACKAGE_LIBCDIO is not set +# BR2_PACKAGE_LIBCUE is not set +# BR2_PACKAGE_LIBCUEFILE is not set +# BR2_PACKAGE_LIBID3TAG is not set +# BR2_PACKAGE_LIBLO is not set +# BR2_PACKAGE_LIBMAD is not set + +# +# libmodplug needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBMPD is not set +# BR2_PACKAGE_LIBMPDCLIENT is not set +# BR2_PACKAGE_LIBREPLAYGAIN is not set +# BR2_PACKAGE_LIBSAMPLERATE is not set +# BR2_PACKAGE_LIBSNDFILE is not set +# BR2_PACKAGE_LIBSOXR is not set +# BR2_PACKAGE_LIBVORBIS is not set + +# +# mp4v2 needs a toolchain w/ C++ +# + +# +# opencore-amr needs a toolchain w/ C++ +# +# BR2_PACKAGE_OPUS is not set +# BR2_PACKAGE_PORTAUDIO is not set +# BR2_PACKAGE_SPEEX is not set + +# +# taglib needs a toolchain w/ C++, wchar +# + +# +# tinyalsa needs toolchain w/ dynamic library +# +# BR2_PACKAGE_TREMOR is not set +# BR2_PACKAGE_VO_AACENC is not set + +# +# webrtc-audio-processing needs a toolchain w/ C++, threads +# + +# +# Compression and decompression +# +# BR2_PACKAGE_LIBARCHIVE is not set +# BR2_PACKAGE_LIBZIP is not set +# BR2_PACKAGE_LZO is not set + +# +# snappy needs a toolchain w/ C++ +# +# BR2_PACKAGE_SZIP is not set +BR2_PACKAGE_ZLIB=y + +# +# Crypto +# +# BR2_PACKAGE_BEECRYPT is not set +BR2_PACKAGE_BOTAN_ARCH_SUPPORTS=y + +# +# botan needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_CA_CERTIFICATES is not set + +# +# cryptodev needs a Linux kernel to be built +# +# BR2_PACKAGE_GNUTLS is not set +# BR2_PACKAGE_LIBASSUAN is not set +# BR2_PACKAGE_LIBGCRYPT is not set +# BR2_PACKAGE_LIBGPG_ERROR is not set +# BR2_PACKAGE_LIBGPGME is not set +# BR2_PACKAGE_LIBKSBA is not set +# BR2_PACKAGE_LIBMCRYPT is not set +# BR2_PACKAGE_LIBMHASH is not set + +# +# libnss needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_LIBSECRET is not set +# BR2_PACKAGE_LIBSHA1 is not set +# BR2_PACKAGE_LIBSSH2 is not set +# BR2_PACKAGE_NETTLE is not set +BR2_PACKAGE_OPENSSL=y +# BR2_PACKAGE_OPENSSL_BIN is not set +# BR2_PACKAGE_OPENSSL_ENGINES is not set +# BR2_PACKAGE_POLARSSL is not set + +# +# Database +# +# BR2_PACKAGE_BERKELEYDB is not set +# BR2_PACKAGE_GDBM is not set + +# +# MySQL needs a toolchain w/ C++, threads +# + +# +# postgresql needs a toolchain w/ glibc +# +# BR2_PACKAGE_REDIS is not set +# BR2_PACKAGE_SQLCIPHER is not set +# BR2_PACKAGE_SQLITE is not set + +# +# Filesystem +# +# BR2_PACKAGE_GAMIN is not set +# BR2_PACKAGE_LIBCONFIG is not set +# BR2_PACKAGE_LIBCONFUSE is not set + +# +# libfuse needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_LIBLOCKFILE is not set + +# +# libnfs needs a toolchain w/ RPC and LARGEFILE +# +# BR2_PACKAGE_LIBSYSFS is not set +# BR2_PACKAGE_LOCKDEV is not set + +# +# Graphics +# +# BR2_PACKAGE_ATK is not set +# BR2_PACKAGE_CAIRO is not set + +# +# exiv2 needs a toolchain w/ C++, dynamic library +# +# BR2_PACKAGE_FONTCONFIG is not set +# BR2_PACKAGE_FREETYPE is not set +# BR2_PACKAGE_GD is not set +# BR2_PACKAGE_GDK_PIXBUF is not set + +# +# giblib support needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_GIFLIB is not set + +# +# harfbuzz needs a toolchain w/ C++ +# +# BR2_PACKAGE_HICOLOR_ICON_THEME is not set + +# +# imlib2 needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_JASPER is not set +# BR2_PACKAGE_JPEG is not set +# BR2_PACKAGE_LCMS2 is not set +# BR2_PACKAGE_LIBART is not set +# BR2_PACKAGE_LIBDMTX is not set +# BR2_PACKAGE_LIBDRM is not set +# BR2_PACKAGE_LIBEXIF is not set +# BR2_PACKAGE_LIBGEOTIFF is not set + +# +# libglew depends on X.org and needs an OpenGL backend +# + +# +# libglu needs an OpenGL backend +# + +# +# libgtk3 needs a toolchain w/ wchar, threads, C++ +# +# BR2_PACKAGE_LIBPNG is not set +# BR2_PACKAGE_LIBQRENCODE is not set + +# +# libraw needs a toolchain w/ C++ +# + +# +# librsvg needs a toolchain w/ wchar, threads, C++ +# +# BR2_PACKAGE_LIBSVG is not set +# BR2_PACKAGE_LIBSVG_CAIRO is not set +# BR2_PACKAGE_LIBSVGTINY is not set +# BR2_PACKAGE_LIBUNGIF is not set + +# +# libva needs a toolchain w/ largefile, threads, dynamic library +# + +# +# libva intel driver needs a toolchain w/ largefile, threads, dynamic library +# + +# +# libvips needs a toolchain w/ wchar, threads, C++ +# + +# +# opencv needs a toolchain w/ C++, NPTL, wchar +# + +# +# pango needs a toolchain w/ wchar, threads, C++ +# +# BR2_PACKAGE_PIXMAN is not set + +# +# poppler needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_TIFF is not set + +# +# wayland needs a toolchain w/ threads, dynamic library +# +BR2_PACKAGE_WEBKIT_ARCH_SUPPORTS=y + +# +# webkit needs libgtk2 and a toolchain w/ C++, wchar, threads +# +# BR2_PACKAGE_WEBP is not set + +# +# zxing needs a toolchain w/ C++ +# + +# +# Hardware handling +# + +# +# ccid needs a toolchain w/ threads, dynamic library +# + +# +# dtc needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_GNU_EFI is not set + +# +# lcdapi needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_LIBAIO is not set + +# +# libatasmart requires udev to be enabled +# + +# +# libcec needs a toolchain w/ C++, wchar, threads, dynamic library +# +# BR2_PACKAGE_LIBFREEFARE is not set +# BR2_PACKAGE_LIBFTDI is not set +# BR2_PACKAGE_LIBHID is not set +# BR2_PACKAGE_LIBIIO is not set + +# +# libinput needs udev /dev management +# +# BR2_PACKAGE_LIBIQRF is not set +# BR2_PACKAGE_LIBLLCP is not set + +# +# libmbim needs udev /dev management and a toolchain w/ wchar, threads +# +# BR2_PACKAGE_LIBNFC is not set +# BR2_PACKAGE_LIBPCIACCESS is not set +# BR2_PACKAGE_LIBQMI is not set +# BR2_PACKAGE_LIBRAW1394 is not set +# BR2_PACKAGE_LIBRTLSDR is not set + +# +# libserial needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBSERIALPORT is not set +# BR2_PACKAGE_LIBSIGROK is not set +# BR2_PACKAGE_LIBSIGROKDECODE is not set +# BR2_PACKAGE_LIBSOC is not set +# BR2_PACKAGE_LIBUSB is not set + +# +# libv4l needs a toolchain w/ largefile, threads and C++, headers >= 3.0 +# +# BR2_PACKAGE_LIBXKBCOMMON is not set +# BR2_PACKAGE_MTDEV is not set +# BR2_PACKAGE_NEARDAL is not set + +# +# pcsc-lite needs a toolchain w/ threads, dynamic library +# + +# +# tslib needs a toolchain w/ dynamic library +# + +# +# urg needs a toolchain w/ C++ +# + +# +# Javascript +# +# BR2_PACKAGE_BOOTSTRAP is not set +# BR2_PACKAGE_EXPLORERCANVAS is not set +# BR2_PACKAGE_FLOT is not set +# BR2_PACKAGE_JQUERY is not set +# BR2_PACKAGE_JQUERY_KEYBOARD is not set +# BR2_PACKAGE_JQUERY_MOBILE is not set +# BR2_PACKAGE_JQUERY_SPARKLINE is not set +# BR2_PACKAGE_JQUERY_UI is not set +# BR2_PACKAGE_JQUERY_VALIDATION is not set +# BR2_PACKAGE_JSMIN is not set +# BR2_PACKAGE_JSON_JAVASCRIPT is not set + +# +# JSON/XML +# + +# +# benejson needs a toolchain w/ C++ +# +# BR2_PACKAGE_EXPAT is not set +# BR2_PACKAGE_EZXML is not set +# BR2_PACKAGE_JANSSON is not set +# BR2_PACKAGE_JSON_C is not set +# BR2_PACKAGE_JSON_GLIB is not set + +# +# libjson needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBROXML is not set +# BR2_PACKAGE_LIBXML2 is not set + +# +# libxml++ needs a toolchain w/ C++, wchar, threads +# +# BR2_PACKAGE_LIBXMLRPC is not set +# BR2_PACKAGE_LIBXSLT is not set +# BR2_PACKAGE_LIBYAML is not set +# BR2_PACKAGE_MXML is not set +# BR2_PACKAGE_RAPIDJSON is not set + +# +# tinyxml needs a toolchain w/ C++ +# + +# +# xerces-c++ needs a toolchain w/ C++, wchar +# +# BR2_PACKAGE_YAJL is not set + +# +# yaml-cpp needs a toolchain w/ C++, largefile, threads +# + +# +# Logging +# +# BR2_PACKAGE_LIBLOG4C_LOCALTIME is not set +# BR2_PACKAGE_LIBLOGGING is not set + +# +# log4cplus needs a toolchain w/ C++, wchar, threads +# + +# +# log4cxx needs a toolchain w/ C++, threads, dynamic library +# + +# +# zlog needs a toolchain w/ threads, largefile, dynamic library +# + +# +# Multimedia +# +# BR2_PACKAGE_LIBASS is not set +# BR2_PACKAGE_LIBDVBCSA is not set + +# +# libdvbsi++ needs a toolchain w/ C++, wchar, threads +# + +# +# libdvdnav needs a toolchain w/ dynamic library, largefile, threads +# + +# +# libdvdread needs a toolchain w/ dynamic library, largefile +# + +# +# libebml needs a toolchain w/ C++ +# + +# +# libmatroska needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBMMS is not set +# BR2_PACKAGE_LIBMPEG2 is not set +# BR2_PACKAGE_LIBOGG is not set +# BR2_PACKAGE_LIBPLAYER is not set +# BR2_PACKAGE_LIBTHEORA is not set + +# +# live555 needs a toolchain w/ C++ +# + +# +# mediastreamer needs a toolchain w/ threads, C++, IPv6 +# +# BR2_PACKAGE_X264 is not set + +# +# Networking +# + +# +# agent++ needs a toolchain w/ threads, C++, dynamic library +# +# BR2_PACKAGE_C_ARES is not set +BR2_PACKAGE_CANFESTIVAL_ARCH_SUPPORTS=y + +# +# canfestival needs a toolchain w/ threads and dynamic library +# +# BR2_PACKAGE_CGIC is not set + +# +# cppzmq needs a toolchain w/ C++, IPv6, largefile, wchar, threads +# + +# +# czmq needs a toolchain w/ C++, IPv6, largefile, wchar, threads +# + +# +# filemq needs a toolchain w/ C++, IPv6, largefile, wchar, threads +# +# BR2_PACKAGE_FLICKCURL is not set +# BR2_PACKAGE_FREERADIUS_CLIENT is not set +# BR2_PACKAGE_GEOIP is not set +# BR2_PACKAGE_GLIB_NETWORKING is not set +# BR2_PACKAGE_LIBCGI is not set + +# +# libcgicc needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBCURL is not set +# BR2_PACKAGE_LIBDNET is not set +# BR2_PACKAGE_LIBEXOSIP2 is not set +# BR2_PACKAGE_LIBFCGI is not set +# BR2_PACKAGE_LIBGSASL is not set +# BR2_PACKAGE_LIBIDN is not set +# BR2_PACKAGE_LIBISCSI is not set +# BR2_PACKAGE_LIBMBUS is not set + +# +# libmemcached needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_LIBMICROHTTPD is not set +# BR2_PACKAGE_LIBMNL is not set +# BR2_PACKAGE_LIBMODBUS is not set +# BR2_PACKAGE_LIBNDP is not set +# BR2_PACKAGE_LIBNICE is not set +# BR2_PACKAGE_LIBNETFILTER_ACCT is not set +# BR2_PACKAGE_LIBNETFILTER_CONNTRACK is not set +# BR2_PACKAGE_LIBNETFILTER_CTHELPER is not set +# BR2_PACKAGE_LIBNETFILTER_CTTIMEOUT is not set +# BR2_PACKAGE_LIBNETFILTER_LOG is not set +# BR2_PACKAGE_LIBNETFILTER_QUEUE is not set +# BR2_PACKAGE_LIBNFNETLINK is not set +# BR2_PACKAGE_LIBNFTNL is not set +# BR2_PACKAGE_LIBNL is not set +# BR2_PACKAGE_LIBOAUTH is not set +# BR2_PACKAGE_LIBOPING is not set +# BR2_PACKAGE_LIBOSIP2 is not set +# BR2_PACKAGE_LIBPCAP is not set +# BR2_PACKAGE_LIBRSYNC is not set + +# +# libshairplay needs a toolchain w/ IPv6, threads, dynamic library +# +# BR2_PACKAGE_LIBSHOUT is not set +# BR2_PACKAGE_LIBSOCKETCAN is not set +# BR2_PACKAGE_LIBSOUP is not set +# BR2_PACKAGE_LIBSRTP is not set +# BR2_PACKAGE_LIBSTROPHE is not set +# BR2_PACKAGE_LIBTIRPC is not set + +# +# libtorrent needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_LIBUPNP is not set + +# +# libupnpp needs a toolchain w/ C++, largefile, threads +# +# BR2_PACKAGE_LIBURIPARSER is not set +# BR2_PACKAGE_LIBVNCSERVER is not set +# BR2_PACKAGE_LIBWEBSOCK is not set +# BR2_PACKAGE_LIBWEBSOCKETS is not set +# BR2_PACKAGE_NEON is not set + +# +# omniORB needs a toolchain w/ C++, threads +# +# BR2_PACKAGE_OPENLDAP is not set +# BR2_PACKAGE_OPENPGM is not set +# BR2_PACKAGE_ORTP is not set +# BR2_PACKAGE_RTMPDUMP is not set +# BR2_PACKAGE_SLIRP is not set + +# +# snmp++ needs a toolchain w/ threads, C++, dynamic library +# +# BR2_PACKAGE_SOFIA_SIP is not set + +# +# thrift needs a toolchain w/ C++, largefile, wchar, threads +# +# BR2_PACKAGE_USBREDIR is not set + +# +# wvstreams needs a toolchain w/ C++, largefile +# + +# +# zeromq needs a toolchain w/ C++, IPv6, largefile, wchar, threads +# + +# +# zmqpp needs a toolchain w/ C++, IPv6, largefile, wchar, threads +# + +# +# zyre needs a toolchain w/ C++, IPv6, largefile, wchar, threads +# + +# +# Other +# + +# +# apr needs a toolchain w/ dynamic library +# + +# +# apr-util needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_ARGP_STANDALONE is not set + +# +# armadillo needs a toolchain w/ C++, largefile +# +BR2_PACKAGE_BDWGC_ARCH_SUPPORTS=y +# BR2_PACKAGE_BDWGC is not set + +# +# boost needs a toolchain w/ C++, largefile, threads +# +# BR2_PACKAGE_CLAPACK is not set + +# +# cppcms needs a toolchain w/ C++, NPTL, wchar, dynamic library +# +# BR2_PACKAGE_DING_LIBS is not set + +# +# eigen needs a toolchain w/ C++ +# + +# +# elfutils needs a toolchain w/ largefile, wchar, dynamic library +# +# BR2_PACKAGE_FFTW is not set + +# +# flann needs a toolchain w/ C++, dynamic library +# + +# +# glibmm needs a toolchain w/ C++, wchar, threads +# + +# +# glm needs a toolchain w/ C++ +# +# BR2_PACKAGE_GMP is not set +# BR2_PACKAGE_GSL is not set + +# +# gtest needs a toolchain w/ C++, wchar, threads +# +# BR2_PACKAGE_LIBARGTABLE2 is not set +BR2_PACKAGE_LIBATOMIC_ARCH_SUPPORTS=y +# BR2_PACKAGE_LIBATOMIC_OPS is not set +BR2_PACKAGE_LIBBSD_ARCH_SUPPORTS=y + +# +# libbsd needs an (e)glibc toolchain w/ threads +# +# BR2_PACKAGE_LIBCAP is not set +# BR2_PACKAGE_LIBCAP_NG is not set + +# +# libcgroup needs an (e)glibc toolchain w/ C++ +# +# BR2_PACKAGE_LIBDAEMON is not set +# BR2_PACKAGE_LIBEE is not set +# BR2_PACKAGE_LIBEV is not set +# BR2_PACKAGE_LIBEVDEV is not set +# BR2_PACKAGE_LIBEVENT is not set +# BR2_PACKAGE_LIBFFI is not set +# BR2_PACKAGE_LIBGLIB2 is not set +# BR2_PACKAGE_LIBICAL is not set + +# +# liblinear needs a toolchain w/ C++ +# +BR2_PACKAGE_LIBNSPR_ARCH_SUPPORT=y +# BR2_PACKAGE_LIBNSPR is not set +# BR2_PACKAGE_LIBPFM4 is not set + +# +# libplist needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBPTHREAD_STUBS is not set +# BR2_PACKAGE_LIBPTHSEM is not set +# BR2_PACKAGE_LIBSECCOMP is not set + +# +# libsigc++ needs a toolchain w/ C++ +# +# BR2_PACKAGE_LIBSIGSEGV is not set +# BR2_PACKAGE_LIBTASN1 is not set +# BR2_PACKAGE_LIBTPL is not set + +# +# libubox needs a toolchain w/ dynamic library +# + +# +# libuci needs a toolchain w/ dynamic library +# + +# +# libunwind needs a uclibc snapshot or (e)glibc toolchain w/ threads +# +# BR2_PACKAGE_LIBURCU is not set +# BR2_PACKAGE_LIBUV is not set +# BR2_PACKAGE_LIGHTNING is not set + +# +# linux-pam needs a toolchain w/ wchar, locale, dynamic library +# +# BR2_PACKAGE_LTTNG_LIBUST is not set +# BR2_PACKAGE_MPC is not set +# BR2_PACKAGE_MPDECIMAL is not set +# BR2_PACKAGE_MPFR is not set + +# +# msgpack needs a toolchain w/ C++ +# +# BR2_PACKAGE_MTDEV2TUIO is not set +# BR2_PACKAGE_ORC is not set + +# +# p11-kit needs a toolchain w/ threads, dynamic library +# + +# +# poco needs a toolchain w/ wchar, threads, C++ +# + +# +# protobuf needs a toolchain w/ C++, threads +# + +# +# protobuf-c needs a toolchain w/ C++, threads +# + +# +# qhull needs a toolchain w/ C++, dynamic library +# + +# +# qlibc needs a toolchain w/ threads, wchar, dynamic library +# + +# +# Security +# + +# +# libselinux needs a toolchain w/ largefile, threads, dynamic library +# + +# +# libsemanage needs a toolchain w/ largefile, threads, dynamic library +# +# BR2_PACKAGE_LIBSEPOL is not set + +# +# Text and terminal handling +# + +# +# enchant needs a toolchain w/ C++, threads, wchar +# + +# +# icu needs a toolchain w/ C++, wchar, threads +# +# BR2_PACKAGE_LIBCLI is not set +# BR2_PACKAGE_LIBEDIT is not set +# BR2_PACKAGE_LIBENCA is not set +# BR2_PACKAGE_LIBESTR is not set +# BR2_PACKAGE_LIBFRIBIDI is not set +BR2_PACKAGE_LIBICONV=y +# BR2_PACKAGE_LIBUNISTRING is not set +# BR2_PACKAGE_LINENOISE is not set +# BR2_PACKAGE_NCURSES is not set +# BR2_PACKAGE_NEWT is not set +# BR2_PACKAGE_PCRE is not set +# BR2_PACKAGE_POPT is not set +# BR2_PACKAGE_READLINE is not set +# BR2_PACKAGE_SLANG is not set + +# +# tclap needs a toolchain w/ C++ +# +# BR2_PACKAGE_USTR is not set + +# +# Mail +# +# BR2_PACKAGE_DOVECOT is not set +# BR2_PACKAGE_EXIM is not set +# BR2_PACKAGE_FETCHMAIL is not set + +# +# heirloom-mailx needs a toolchain w/ dynamic library +# + +# +# libesmtp needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_MSMTP is not set +# BR2_PACKAGE_MUTT is not set + +# +# Miscellaneous +# +# BR2_PACKAGE_AESPIPE is not set +# BR2_PACKAGE_BC is not set +# BR2_PACKAGE_CLAMAV is not set + +# +# collectd needs a toolchain w/ IPv6, threads, dynamic library +# +# BR2_PACKAGE_EMPTY is not set +# BR2_PACKAGE_GOOGLEFONTDIRECTORY is not set +# BR2_PACKAGE_HAVEGED is not set +# BR2_PACKAGE_MCRYPT is not set +# BR2_PACKAGE_MOBILE_BROADBAND_PROVIDER_INFO is not set +# BR2_PACKAGE_QEMU is not set +# BR2_PACKAGE_SHARED_MIME_INFO is not set + +# +# snowball-init needs a toolchain w/ wchar, threads, dynamic library +# +# BR2_PACKAGE_SOUND_THEME_BOREALIS is not set +# BR2_PACKAGE_SOUND_THEME_FREEDESKTOP is not set + +# +# Networking applications +# +# BR2_PACKAGE_AICCU is not set +# BR2_PACKAGE_AIRCRACK_NG is not set +# BR2_PACKAGE_ARGUS is not set +# BR2_PACKAGE_ARPTABLES is not set +# BR2_PACKAGE_ATFTP is not set +# BR2_PACKAGE_AUTOSSH is not set +# BR2_PACKAGE_AXEL is not set +# BR2_PACKAGE_BANDWIDTHD is not set + +# +# bcusdk needs a toolchain w/ C++ +# + +# +# bind needs a toolchain w/ largefile, IPv6, dynamic library +# + +# +# bluez-utils needs a toolchain w/ wchar, threads, dynamic library +# + +# +# bluez5-utils needs a toolchain w/ wchar, threads, IPv6, headers >= 3.4, dynamic library +# +# BR2_PACKAGE_BMON is not set +# BR2_PACKAGE_BOA is not set +# BR2_PACKAGE_BRIDGE_UTILS is not set +# BR2_PACKAGE_BWM_NG is not set +# BR2_PACKAGE_CAN_UTILS is not set +# BR2_PACKAGE_CHRONY is not set +# BR2_PACKAGE_CIVETWEB is not set + +# +# connman needs a toolchain w/ IPv6, wchar, threads, resolver, dynamic library +# +# BR2_PACKAGE_CONNTRACK_TOOLS is not set + +# +# crda needs a toolchain w/ threads, dynamic library +# + +# +# ctorrent needs a toolchain w/ C++ +# +# BR2_PACKAGE_CUPS is not set +# BR2_PACKAGE_DHCPCD is not set +# BR2_PACKAGE_DHCPDUMP is not set +# BR2_PACKAGE_DNSMASQ is not set +# BR2_PACKAGE_DROPBEAR is not set +# BR2_PACKAGE_EBTABLES is not set + +# +# ejabberd needs erlang +# +# BR2_PACKAGE_ETHTOOL is not set + +# +# faifa needs a toolchain w/ dynamic library, threads +# +# BR2_PACKAGE_FPING is not set +# BR2_PACKAGE_GESFTPSERVER is not set +# BR2_PACKAGE_HIAWATHA is not set +# BR2_PACKAGE_HOSTAPD is not set +# BR2_PACKAGE_HTTPING is not set +# BR2_PACKAGE_IFTOP is not set + +# +# igh-ethercat needs a Linux kernel to be built +# +# BR2_PACKAGE_IGMPPROXY is not set + +# +# inadyn needs a toolchain w/ dynamic library +# + +# +# iperf needs a toolchain w/ C++ +# +# BR2_PACKAGE_IPERF3 is not set +# BR2_PACKAGE_IPROUTE2 is not set +# BR2_PACKAGE_IPSEC_TOOLS is not set +# BR2_PACKAGE_IPSET is not set +BR2_PACKAGE_IPTABLES=y +# BR2_PACKAGE_IPTRAF_NG is not set +# BR2_PACKAGE_IPUTILS is not set +# BR2_PACKAGE_IW is not set +# BR2_PACKAGE_JANUS_GATEWAY is not set + +# +# kismet needs a toolchain w/ threads, C++ +# +# BR2_PACKAGE_KNOCK is not set +# BR2_PACKAGE_LEAFNODE2 is not set + +# +# lftp requires a toolchain w/ C++, wchar +# + +# +# lighttpd needs a toolchain w/ dynamic library +# + +# +# linknx needs a toolchain w/ C++ +# +# BR2_PACKAGE_LINKS is not set + +# +# linphone needs a toolchain w/ threads, C++, IPv6 +# +# BR2_PACKAGE_LINUX_ZIGBEE is not set +# BR2_PACKAGE_LRZSZ is not set +# BR2_PACKAGE_MACCHANGER is not set +# BR2_PACKAGE_MEMCACHED is not set +# BR2_PACKAGE_MII_DIAG is not set +# BR2_PACKAGE_MINIDLNA is not set + +# +# mjpg-streamer needs a toolchain w/ threads, headers >= 3.0, dynamic library +# + +# +# modemmanager needs udev /dev management and a toolchain w/ largefile, wchar, threads, IPv6 +# +# BR2_PACKAGE_MONGOOSE is not set +# BR2_PACKAGE_MROUTED is not set +# BR2_PACKAGE_MTR is not set +# BR2_PACKAGE_NBD is not set +# BR2_PACKAGE_NCFTP is not set +# BR2_PACKAGE_NDISC6 is not set +# BR2_PACKAGE_NETATALK is not set +# BR2_PACKAGE_NETPLUG is not set +# BR2_PACKAGE_NETSNMP is not set +# BR2_PACKAGE_NETSTAT_NAT is not set + +# +# NetworkManager needs udev /dev management and a toolchain w/ IPv6, largefile, wchar, threads, headers >= 3.7 +# +# BR2_PACKAGE_NFACCT is not set +# BR2_PACKAGE_NFTABLES is not set +# BR2_PACKAGE_NGINX is not set +# BR2_PACKAGE_NGIRCD is not set +# BR2_PACKAGE_NGREP is not set + +# +# nmap needs a toolchain w/ C++, IPv6, threads +# +# BR2_PACKAGE_NOIP is not set +# BR2_PACKAGE_NTP is not set +# BR2_PACKAGE_NUTTCP is not set +# BR2_PACKAGE_ODHCP6C is not set +# BR2_PACKAGE_ODHCPLOC is not set +# BR2_PACKAGE_OLSR is not set +# BR2_PACKAGE_OPENNTPD is not set +# BR2_PACKAGE_OPENOBEX is not set +BR2_PACKAGE_OPENSSH=y +# BR2_PACKAGE_OPENSWAN is not set +# BR2_PACKAGE_OPENVPN is not set +# BR2_PACKAGE_P910ND is not set + +# +# portmap needs a toolchain w/ RPC +# + +# +# pppd needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_PPTP_LINUX is not set +# BR2_PACKAGE_PROFTPD is not set + +# +# proxychains-ng needs a toolchain w/ threads, dynamic library +# +# BR2_PACKAGE_PTPD is not set +# BR2_PACKAGE_PTPD2 is not set +# BR2_PACKAGE_PURE_FTPD is not set +# BR2_PACKAGE_QUAGGA is not set +# BR2_PACKAGE_RADVD is not set +# BR2_PACKAGE_RPCBIND is not set +# BR2_PACKAGE_RSH_REDONE is not set +# BR2_PACKAGE_RSYNC is not set + +# +# rtorrent needs a toolchain w/ C++, threads, wchar +# +# BR2_PACKAGE_RTPTOOLS is not set +# BR2_PACKAGE_SAMBA is not set +# BR2_PACKAGE_SAMBA4 is not set + +# +# sconeserver needs a toolchain w/ C++, NPTL +# +# BR2_PACKAGE_SER2NET is not set +# BR2_PACKAGE_SHAIRPORT_SYNC is not set +# BR2_PACKAGE_SMCROUTE is not set +# BR2_PACKAGE_SOCAT is not set +# BR2_PACKAGE_SOCKETCAND is not set +# BR2_PACKAGE_SPAWN_FCGI is not set + +# +# spice server depends on python (for pyparsing) +# +# BR2_PACKAGE_SPICE_PROTOCOL is not set + +# +# squid needs a toolchain w/ C++, IPv6, headers >= 3.0 +# +# BR2_PACKAGE_SSHPASS is not set +# BR2_PACKAGE_STRONGSWAN is not set +# BR2_PACKAGE_STUNNEL is not set +# BR2_PACKAGE_TCPDUMP is not set +# BR2_PACKAGE_TCPING is not set +# BR2_PACKAGE_TCPREPLAY is not set +# BR2_PACKAGE_THTTPD is not set +# BR2_PACKAGE_TINC is not set +# BR2_PACKAGE_TINYHTTPD is not set +# BR2_PACKAGE_TN5250 is not set +# BR2_PACKAGE_TRANSMISSION is not set +# BR2_PACKAGE_TVHEADEND is not set +# BR2_PACKAGE_UDPCAST is not set + +# +# ushare needs a toolchain w/ largefile, threads, dynamic library +# + +# +# ussp-push needs a toolchain w/ wchar, IPv6, threads, dynamic library +# + +# +# vde2 needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_VNSTAT is not set +# BR2_PACKAGE_VPNC is not set +# BR2_PACKAGE_VSFTPD is not set +# BR2_PACKAGE_VTUN is not set +# BR2_PACKAGE_WIRELESS_REGDB is not set +# BR2_PACKAGE_WIRELESS_TOOLS is not set +# BR2_PACKAGE_WIRESHARK is not set +# BR2_PACKAGE_WPA_SUPPLICANT is not set + +# +# wvdial needs a toolchain w/ C++, largefile +# +# BR2_PACKAGE_XINETD is not set +# BR2_PACKAGE_XL2TP is not set + +# +# xtables-addons needs a Linux kernel to be built +# + +# +# xtables-addons needs a toolchain w/ dynamic library, largefile, threads +# + +# +# znc needs a toolchain w/ C++ +# + +# +# Package managers +# +# BR2_PACKAGE_IPKG is not set +# BR2_PACKAGE_OPKG is not set + +# +# Real-Time +# +# BR2_PACKAGE_XENOMAI is not set + +# +# Shell and utilities +# + +# +# Shells +# + +# +# Utilities +# +# BR2_PACKAGE_AT is not set +# BR2_PACKAGE_CCRYPT is not set +# BR2_PACKAGE_DIALOG is not set +# BR2_PACKAGE_DTACH is not set +# BR2_PACKAGE_FILE is not set +# BR2_PACKAGE_GNUPG is not set + +# +# gnupg2 needs a toolchain w/ dynamic library +# +# BR2_PACKAGE_INOTIFY_TOOLS is not set +# BR2_PACKAGE_LOCKFILE_PROGS is not set +# BR2_PACKAGE_LOGROTATE is not set +# BR2_PACKAGE_LOGSURFER is not set +# BR2_PACKAGE_PINENTRY is not set +# BR2_PACKAGE_SCREEN is not set +# BR2_PACKAGE_SUDO is not set +# BR2_PACKAGE_TMUX is not set +# BR2_PACKAGE_XMLSTARLET is not set + +# +# System tools +# +# BR2_PACKAGE_ACL is not set +# BR2_PACKAGE_ATTR is not set +# BR2_PACKAGE_CPULOAD is not set +# BR2_PACKAGE_FTOP is not set +# BR2_PACKAGE_GETENT is not set +# BR2_PACKAGE_HTOP is not set +# BR2_PACKAGE_IPRUTILS is not set +# BR2_PACKAGE_IRQBALANCE is not set +# BR2_PACKAGE_KEYUTILS is not set +# BR2_PACKAGE_LXC is not set +# BR2_PACKAGE_MONIT is not set +# BR2_PACKAGE_NCDU is not set +# BR2_PACKAGE_NUMACTL is not set + +# +# nut needs a toolchain w/ C++ +# + +# +# openvmtools needs a toolchain w/ wchar, threads, RPC, largefile, locale +# +# BR2_PACKAGE_POLKIT is not set +# BR2_PACKAGE_PWGEN is not set +# BR2_PACKAGE_QUOTA is not set + +# +# smack needs a toolchain w/ dynamic library, threads, headers >= 3.0 +# + +# +# supervisor needs the python interpreter +# +BR2_PACKAGE_SYSTEMD_ARCH_SUPPORTS=y +BR2_PACKAGE_UTIL_LINUX=y +BR2_PACKAGE_UTIL_LINUX_LIBBLKID=y +# BR2_PACKAGE_UTIL_LINUX_LIBMOUNT is not set +# BR2_PACKAGE_UTIL_LINUX_LIBSMARTCOLS is not set +BR2_PACKAGE_UTIL_LINUX_LIBUUID=y +# BR2_PACKAGE_UTIL_LINUX_BINARIES is not set + +# +# Text editors and viewers +# +# BR2_PACKAGE_ED is not set +# BR2_PACKAGE_JOE is not set +# BR2_PACKAGE_NANO is not set +# BR2_PACKAGE_UEMACS is not set + +# +# Filesystem images +# +# BR2_TARGET_ROOTFS_CLOOP is not set +# BR2_TARGET_ROOTFS_CPIO is not set +# BR2_TARGET_ROOTFS_CRAMFS is not set +# BR2_TARGET_ROOTFS_EXT2 is not set + +# +# initramfs needs a Linux kernel to be built +# + +# +# iso image needs a Linux kernel to be built +# +# BR2_TARGET_ROOTFS_JFFS2 is not set +# BR2_TARGET_ROOTFS_ROMFS is not set +# BR2_TARGET_ROOTFS_SQUASHFS is not set +BR2_TARGET_ROOTFS_TAR=y +BR2_TARGET_ROOTFS_TAR_NONE=y +# BR2_TARGET_ROOTFS_TAR_GZIP is not set +# BR2_TARGET_ROOTFS_TAR_BZIP2 is not set +# BR2_TARGET_ROOTFS_TAR_LZMA is not set +# BR2_TARGET_ROOTFS_TAR_LZO is not set +# BR2_TARGET_ROOTFS_TAR_XZ is not set +BR2_TARGET_ROOTFS_TAR_OPTIONS="" +# BR2_TARGET_ROOTFS_UBIFS is not set +# BR2_TARGET_ROOTFS_YAFFS2 is not set + +# +# Bootloaders +# +# BR2_TARGET_BAREBOX is not set +# BR2_TARGET_GRUB is not set +# BR2_TARGET_GRUB2 is not set +# BR2_TARGET_GUMMIBOOT is not set +# BR2_TARGET_SYSLINUX is not set +# BR2_TARGET_UBOOT is not set + +# +# Host utilities +# +# BR2_PACKAGE_HOST_CHECKPOLICY is not set +# BR2_PACKAGE_HOST_CRAMFS is not set +# BR2_PACKAGE_HOST_DFU_UTIL is not set +# BR2_PACKAGE_HOST_DOS2UNIX is not set +# BR2_PACKAGE_HOST_DOSFSTOOLS is not set +# BR2_PACKAGE_HOST_E2FSPROGS is not set +# BR2_PACKAGE_HOST_E2TOOLS is not set +# BR2_PACKAGE_HOST_GENEXT2FS is not set +# BR2_PACKAGE_HOST_GENIMAGE is not set +# BR2_PACKAGE_HOST_GENPART is not set +# BR2_PACKAGE_HOST_LPC3250LOADER is not set +# BR2_PACKAGE_HOST_MKE2IMG is not set +# BR2_PACKAGE_HOST_MTD is not set +# BR2_PACKAGE_HOST_MTOOLS is not set +# BR2_PACKAGE_HOST_OPENOCD is not set +# BR2_PACKAGE_HOST_PARTED is not set +# BR2_PACKAGE_HOST_PATCHELF is not set +# BR2_PACKAGE_HOST_PWGEN is not set +# BR2_PACKAGE_HOST_QEMU is not set +# BR2_PACKAGE_HOST_SAM_BA is not set +# BR2_PACKAGE_HOST_SQUASHFS is not set +# BR2_PACKAGE_HOST_UBOOT_TOOLS is not set +# BR2_PACKAGE_HOST_UTIL_LINUX is not set + +# +# Legacy config options +# + +# +# Legacy options removed in 2015.02 +# +# BR2_PACKAGE_LIBGC is not set +# BR2_PACKAGE_WDCTL is not set +# BR2_PACKAGE_RPM_BZIP2_PAYLOADS is not set +# BR2_PACKAGE_RPM_XZ_PAYLOADS is not set +# BR2_PACKAGE_M4 is not set +# BR2_PACKAGE_FLEX_BINARY is not set +# BR2_PACKAGE_BISON is not set +# BR2_PACKAGE_GOB2 is not set +# BR2_PACKAGE_DISTCC is not set +# BR2_PACKAGE_HASERL_VERSION_0_8_X is not set +# BR2_PACKAGE_STRONGSWAN_TOOLS is not set +# BR2_PACKAGE_XBMC_ADDON_XVDR is not set +# BR2_PACKAGE_XBMC_PVR_ADDONS is not set +# BR2_PACKAGE_XBMC is not set +# BR2_PACKAGE_XBMC_ALSA_LIB is not set +# BR2_PACKAGE_XBMC_AVAHI is not set +# BR2_PACKAGE_XBMC_DBUS is not set +# BR2_PACKAGE_XBMC_LIBBLURAY is not set +# BR2_PACKAGE_XBMC_GOOM is not set +# BR2_PACKAGE_XBMC_RSXS is not set +# BR2_PACKAGE_XBMC_LIBCEC is not set +# BR2_PACKAGE_XBMC_LIBMICROHTTPD is not set +# BR2_PACKAGE_XBMC_LIBNFS is not set +# BR2_PACKAGE_XBMC_RTMPDUMP is not set +# BR2_PACKAGE_XBMC_LIBSHAIRPLAY is not set +# BR2_PACKAGE_XBMC_LIBSMBCLIENT is not set +# BR2_PACKAGE_XBMC_LIBTHEORA is not set +# BR2_PACKAGE_XBMC_LIBUSB is not set +# BR2_PACKAGE_XBMC_LIBVA is not set +# BR2_PACKAGE_XBMC_WAVPACK is not set +# BR2_PREFER_STATIC_LIB is not set + +# +# Legacy options removed in 2014.11 +# +# BR2_x86_generic is not set +# BR2_GCC_VERSION_4_4_X is not set +# BR2_sparc_sparchfleon is not set +# BR2_sparc_sparchfleonv8 is not set +# BR2_sparc_sparcsfleon is not set +# BR2_sparc_sparcsfleonv8 is not set +# BR2_PACKAGE_LINUX_FIRMWARE_XC5000 is not set +# BR2_PACKAGE_LINUX_FIRMWARE_CXGB4 is not set +# BR2_PACKAGE_LINUX_FIRMWARE_IWLWIFI_3160_7260_7 is not set +# BR2_PACKAGE_LINUX_FIRMWARE_IWLWIFI_3160_7260_8 is not set + +# +# Legacy options removed in 2014.08 +# +# BR2_PACKAGE_LIBELF is not set +# BR2_KERNEL_HEADERS_3_8 is not set +# BR2_PACKAGE_GETTEXT_TOOLS is not set +# BR2_PACKAGE_PROCPS is not set +# BR2_BINUTILS_VERSION_2_20_1 is not set +# BR2_BINUTILS_VERSION_2_21 is not set +# BR2_BINUTILS_VERSION_2_23_1 is not set +# BR2_UCLIBC_VERSION_0_9_32 is not set +# BR2_GCC_VERSION_4_3_X is not set +# BR2_GCC_VERSION_4_6_X is not set +# BR2_GDB_VERSION_7_4 is not set +# BR2_GDB_VERSION_7_5 is not set +# BR2_BUSYBOX_VERSION_1_19_X is not set +# BR2_BUSYBOX_VERSION_1_20_X is not set +# BR2_BUSYBOX_VERSION_1_21_X is not set +# BR2_PACKAGE_LIBV4L_DECODE_TM6000 is not set +# BR2_PACKAGE_LIBV4L_IR_KEYTABLE is not set +# BR2_PACKAGE_LIBV4L_V4L2_COMPLIANCE is not set +# BR2_PACKAGE_LIBV4L_V4L2_CTL is not set +# BR2_PACKAGE_LIBV4L_V4L2_DBG is not set + +# +# Legacy options removed in 2014.05 +# +# BR2_PACKAGE_EVTEST_CAPTURE is not set +# BR2_KERNEL_HEADERS_3_6 is not set +# BR2_KERNEL_HEADERS_3_7 is not set +# BR2_PACKAGE_VALA is not set +BR2_PACKAGE_TZDATA_ZONELIST="" +# BR2_PACKAGE_LUA_INTERPRETER_EDITING_NONE is not set +# BR2_PACKAGE_LUA_INTERPRETER_READLINE is not set +# BR2_PACKAGE_LUA_INTERPRETER_LINENOISE is not set +# BR2_PACKAGE_DVB_APPS_UTILS is not set +# BR2_KERNEL_HEADERS_SNAP is not set +# BR2_ROOTFS_DEVICE_CREATION_DYNAMIC_UDEV is not set +# BR2_PACKAGE_UDEV is not set +# BR2_PACKAGE_UDEV_RULES_GEN is not set +# BR2_PACKAGE_UDEV_ALL_EXTRAS is not set + +# +# Legacy options removed in 2014.02 +# +# BR2_sh2 is not set +# BR2_sh3 is not set +# BR2_sh3eb is not set +# BR2_KERNEL_HEADERS_3_1 is not set +# BR2_KERNEL_HEADERS_3_3 is not set +# BR2_KERNEL_HEADERS_3_5 is not set +# BR2_GDB_VERSION_7_2 is not set +# BR2_GDB_VERSION_7_3 is not set +# BR2_PACKAGE_CCACHE is not set +# BR2_HAVE_DOCUMENTATION is not set +# BR2_PACKAGE_AUTOMAKE is not set +# BR2_PACKAGE_AUTOCONF is not set +# BR2_PACKAGE_XSTROKE is not set +# BR2_PACKAGE_LZMA is not set +# BR2_PACKAGE_TTCP is not set +# BR2_PACKAGE_LIBNFC_LLCP is not set +# BR2_PACKAGE_MYSQL_CLIENT is not set +# BR2_PACKAGE_SQUASHFS3 is not set +# BR2_TARGET_ROOTFS_SQUASHFS3 is not set +# BR2_PACKAGE_NETKITBASE is not set +# BR2_PACKAGE_NETKITTELNET is not set +# BR2_PACKAGE_LUASQL is not set +# BR2_PACKAGE_LUACJSON is not set + +# +# Legacy options removed in 2013.11 +# +# BR2_PACKAGE_LVM2_DMSETUP_ONLY is not set +# BR2_PACKAGE_QT_JAVASCRIPTCORE is not set +# BR2_PACKAGE_MODULE_INIT_TOOLS is not set +BR2_TARGET_UBOOT_CUSTOM_GIT_REPO_URL="" +BR2_TARGET_UBOOT_CUSTOM_GIT_VERSION="" +BR2_LINUX_KERNEL_CUSTOM_GIT_REPO_URL="" +BR2_LINUX_KERNEL_CUSTOM_GIT_VERSION="" + +# +# Legacy options removed in 2013.08 +# +# BR2_ARM_OABI is not set +# BR2_PACKAGE_DOSFSTOOLS_DOSFSCK is not set +# BR2_PACKAGE_DOSFSTOOLS_DOSFSLABEL is not set +# BR2_PACKAGE_DOSFSTOOLS_MKDOSFS is not set +# BR2_ELF2FLT is not set +# BR2_VFP_FLOAT is not set +# BR2_PACKAGE_GCC_TARGET is not set +# BR2_HAVE_DEVFILES is not set + +# +# Legacy options removed in 2013.05 +# +# BR2_PACKAGE_LINUX_FIRMWARE_RTL_8192 is not set +# BR2_PACKAGE_LINUX_FIRMWARE_RTL_8712 is not set + +# +# Legacy options removed in 2013.02 +# +# BR2_sa110 is not set +# BR2_sa1100 is not set +# BR2_PACKAGE_GDISK is not set +# BR2_PACKAGE_GDISK_GDISK is not set +# BR2_PACKAGE_GDISK_SGDISK is not set +# BR2_PACKAGE_GDB_HOST is not set +# BR2_PACKAGE_DIRECTB_DITHER_RGB16 is not set +# BR2_PACKAGE_DIRECTB_TESTS is not set + +# +# Legacy options removed in 2012.11 +# +# BR2_PACKAGE_CUSTOMIZE is not set +# BR2_PACKAGE_XSERVER_xorg is not set +# BR2_PACKAGE_XSERVER_tinyx is not set +# BR2_PACKAGE_PTHREAD_STUBS is not set + +# +# Legacy options removed in 2012.08 +# +# BR2_PACKAGE_GETTEXT_STATIC is not set +# BR2_PACKAGE_LIBINTL is not set +# BR2_PACKAGE_INPUT_TOOLS_EVTEST is not set +# BR2_BFIN_FDPIC is not set +# BR2_BFIN_FLAT is not set diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/busybox-ps-modprobe-only.config b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/busybox-ps-modprobe-only.config new file mode 100644 index 00000000..94fc81c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/config/busybox-ps-modprobe-only.config @@ -0,0 +1,1002 @@ +# +# Automatically generated make config: don't edit +# Busybox version: 1.22.1 +# Mon Feb 16 05:30:21 2015 +# +CONFIG_HAVE_DOT_CONFIG=y + +# +# Busybox Settings +# + +# +# General Configuration +# +CONFIG_DESKTOP=y +# CONFIG_EXTRA_COMPAT is not set +CONFIG_INCLUDE_SUSv2=y +# CONFIG_USE_PORTABLE_CODE is not set +CONFIG_PLATFORM_LINUX=y +CONFIG_FEATURE_BUFFERS_USE_MALLOC=y +# CONFIG_FEATURE_BUFFERS_GO_ON_STACK is not set +# CONFIG_FEATURE_BUFFERS_GO_IN_BSS is not set +CONFIG_SHOW_USAGE=y +CONFIG_FEATURE_VERBOSE_USAGE=y +# CONFIG_FEATURE_COMPRESS_USAGE is not set +CONFIG_FEATURE_INSTALLER=y +# CONFIG_INSTALL_NO_USR is not set +# CONFIG_LOCALE_SUPPORT is not set +# CONFIG_UNICODE_SUPPORT is not set +# CONFIG_UNICODE_USING_LOCALE is not set +# CONFIG_FEATURE_CHECK_UNICODE_IN_ENV is not set +CONFIG_SUBST_WCHAR=0 +CONFIG_LAST_SUPPORTED_WCHAR=0 +# CONFIG_UNICODE_COMBINING_WCHARS is not set +# CONFIG_UNICODE_WIDE_WCHARS is not set +# CONFIG_UNICODE_BIDI_SUPPORT is not set +# CONFIG_UNICODE_NEUTRAL_TABLE is not set +# CONFIG_UNICODE_PRESERVE_BROKEN is not set +CONFIG_LONG_OPTS=y +CONFIG_FEATURE_DEVPTS=y +CONFIG_FEATURE_CLEAN_UP=y +CONFIG_FEATURE_UTMP=y +CONFIG_FEATURE_WTMP=y +# CONFIG_FEATURE_PIDFILE is not set +CONFIG_PID_FILE_PATH="" +CONFIG_FEATURE_SUID=y +# CONFIG_FEATURE_SUID_CONFIG is not set +# CONFIG_FEATURE_SUID_CONFIG_QUIET is not set +# CONFIG_SELINUX is not set +# CONFIG_FEATURE_PREFER_APPLETS is not set +CONFIG_BUSYBOX_EXEC_PATH="/proc/self/exe" +# CONFIG_FEATURE_SYSLOG is not set +# CONFIG_FEATURE_HAVE_RPC is not set + +# +# Build Options +# +# CONFIG_STATIC is not set +# CONFIG_PIE is not set +# CONFIG_NOMMU is not set +# CONFIG_BUILD_LIBBUSYBOX is not set +# CONFIG_FEATURE_INDIVIDUAL is not set +# CONFIG_FEATURE_SHARED_BUSYBOX is not set +CONFIG_LFS=y +CONFIG_CROSS_COMPILER_PREFIX="" +CONFIG_SYSROOT="" +CONFIG_EXTRA_CFLAGS="" +CONFIG_EXTRA_LDFLAGS="" +CONFIG_EXTRA_LDLIBS="" + +# +# Debugging Options +# +# CONFIG_DEBUG is not set +# CONFIG_DEBUG_PESSIMIZE is not set +# CONFIG_WERROR is not set +CONFIG_NO_DEBUG_LIB=y +# CONFIG_DMALLOC is not set +# CONFIG_EFENCE is not set + +# +# Installation Options ("make install" behavior) +# +CONFIG_INSTALL_APPLET_SYMLINKS=y +# CONFIG_INSTALL_APPLET_HARDLINKS is not set +# CONFIG_INSTALL_APPLET_SCRIPT_WRAPPERS is not set +# CONFIG_INSTALL_APPLET_DONT is not set +# CONFIG_INSTALL_SH_APPLET_SYMLINK is not set +# CONFIG_INSTALL_SH_APPLET_HARDLINK is not set +# CONFIG_INSTALL_SH_APPLET_SCRIPT_WRAPPER is not set +CONFIG_PREFIX="./_install" + +# +# Busybox Library Tuning +# +# CONFIG_FEATURE_SYSTEMD is not set +CONFIG_FEATURE_RTMINMAX=y +CONFIG_PASSWORD_MINLEN=6 +CONFIG_MD5_SMALL=1 +CONFIG_SHA3_SMALL=1 +# CONFIG_FEATURE_FAST_TOP is not set +# CONFIG_FEATURE_ETC_NETWORKS is not set +# CONFIG_FEATURE_USE_TERMIOS is not set +CONFIG_FEATURE_EDITING=y +CONFIG_FEATURE_EDITING_MAX_LEN=1024 +CONFIG_FEATURE_EDITING_VI=y +CONFIG_FEATURE_EDITING_HISTORY=999 +CONFIG_FEATURE_EDITING_SAVEHISTORY=y +# CONFIG_FEATURE_EDITING_SAVE_ON_EXIT is not set +CONFIG_FEATURE_REVERSE_SEARCH=y +CONFIG_FEATURE_TAB_COMPLETION=y +# CONFIG_FEATURE_USERNAME_COMPLETION is not set +CONFIG_FEATURE_EDITING_FANCY_PROMPT=y +# CONFIG_FEATURE_EDITING_ASK_TERMINAL is not set +CONFIG_FEATURE_NON_POSIX_CP=y +# CONFIG_FEATURE_VERBOSE_CP_MESSAGE is not set +CONFIG_FEATURE_COPYBUF_KB=4 +CONFIG_FEATURE_SKIP_ROOTFS=y +CONFIG_MONOTONIC_SYSCALL=y +CONFIG_IOCTL_HEX2STR_ERROR=y +CONFIG_FEATURE_HWIB=y + +# +# Applets +# + +# +# Archival Utilities +# +# CONFIG_FEATURE_SEAMLESS_XZ is not set +# CONFIG_FEATURE_SEAMLESS_LZMA is not set +# CONFIG_FEATURE_SEAMLESS_BZ2 is not set +# CONFIG_FEATURE_SEAMLESS_GZ is not set +# CONFIG_FEATURE_SEAMLESS_Z is not set +# CONFIG_AR is not set +# CONFIG_FEATURE_AR_LONG_FILENAMES is not set +# CONFIG_FEATURE_AR_CREATE is not set +# CONFIG_UNCOMPRESS is not set +# CONFIG_GUNZIP is not set +# CONFIG_BUNZIP2 is not set +# CONFIG_UNLZMA is not set +# CONFIG_FEATURE_LZMA_FAST is not set +# CONFIG_LZMA is not set +# CONFIG_UNXZ is not set +# CONFIG_XZ is not set +# CONFIG_BZIP2 is not set +# CONFIG_CPIO is not set +# CONFIG_FEATURE_CPIO_O is not set +# CONFIG_FEATURE_CPIO_P is not set +# CONFIG_DPKG is not set +# CONFIG_DPKG_DEB is not set +# CONFIG_FEATURE_DPKG_DEB_EXTRACT_ONLY is not set +# CONFIG_GZIP is not set +# CONFIG_FEATURE_GZIP_LONG_OPTIONS is not set +CONFIG_GZIP_FAST=0 +# CONFIG_LZOP is not set +# CONFIG_LZOP_COMPR_HIGH is not set +# CONFIG_RPM is not set +# CONFIG_RPM2CPIO is not set +# CONFIG_TAR is not set +# CONFIG_FEATURE_TAR_CREATE is not set +# CONFIG_FEATURE_TAR_AUTODETECT is not set +# CONFIG_FEATURE_TAR_FROM is not set +# CONFIG_FEATURE_TAR_OLDGNU_COMPATIBILITY is not set +# CONFIG_FEATURE_TAR_OLDSUN_COMPATIBILITY is not set +# CONFIG_FEATURE_TAR_GNU_EXTENSIONS is not set +# CONFIG_FEATURE_TAR_LONG_OPTIONS is not set +# CONFIG_FEATURE_TAR_TO_COMMAND is not set +# CONFIG_FEATURE_TAR_UNAME_GNAME is not set +# CONFIG_FEATURE_TAR_NOPRESERVE_TIME is not set +# CONFIG_FEATURE_TAR_SELINUX is not set +# CONFIG_UNZIP is not set + +# +# Coreutils +# +# CONFIG_BASENAME is not set +# CONFIG_CAT is not set +# CONFIG_DATE is not set +# CONFIG_FEATURE_DATE_ISOFMT is not set +# CONFIG_FEATURE_DATE_NANO is not set +# CONFIG_FEATURE_DATE_COMPAT is not set +# CONFIG_HOSTID is not set +# CONFIG_ID is not set +# CONFIG_GROUPS is not set +# CONFIG_TEST is not set +# CONFIG_FEATURE_TEST_64 is not set +# CONFIG_TOUCH is not set +# CONFIG_FEATURE_TOUCH_NODEREF is not set +# CONFIG_FEATURE_TOUCH_SUSV3 is not set +# CONFIG_TR is not set +# CONFIG_FEATURE_TR_CLASSES is not set +# CONFIG_FEATURE_TR_EQUIV is not set +# CONFIG_BASE64 is not set +# CONFIG_WHO is not set +# CONFIG_USERS is not set +# CONFIG_CAL is not set +# CONFIG_CATV is not set +# CONFIG_CHGRP is not set +# CONFIG_CHMOD is not set +# CONFIG_CHOWN is not set +# CONFIG_FEATURE_CHOWN_LONG_OPTIONS is not set +# CONFIG_CHROOT is not set +# CONFIG_CKSUM is not set +# CONFIG_COMM is not set +# CONFIG_CP is not set +# CONFIG_FEATURE_CP_LONG_OPTIONS is not set +# CONFIG_CUT is not set +# CONFIG_DD is not set +# CONFIG_FEATURE_DD_SIGNAL_HANDLING is not set +# CONFIG_FEATURE_DD_THIRD_STATUS_LINE is not set +# CONFIG_FEATURE_DD_IBS_OBS is not set +# CONFIG_DF is not set +# CONFIG_FEATURE_DF_FANCY is not set +# CONFIG_DIRNAME is not set +# CONFIG_DOS2UNIX is not set +# CONFIG_UNIX2DOS is not set +# CONFIG_DU is not set +# CONFIG_FEATURE_DU_DEFAULT_BLOCKSIZE_1K is not set +CONFIG_ECHO=y +# CONFIG_FEATURE_FANCY_ECHO is not set +# CONFIG_ENV is not set +# CONFIG_FEATURE_ENV_LONG_OPTIONS is not set +# CONFIG_EXPAND is not set +# CONFIG_FEATURE_EXPAND_LONG_OPTIONS is not set +# CONFIG_EXPR is not set +# CONFIG_EXPR_MATH_SUPPORT_64 is not set +# CONFIG_FALSE is not set +# CONFIG_FOLD is not set +# CONFIG_FSYNC is not set +# CONFIG_HEAD is not set +# CONFIG_FEATURE_FANCY_HEAD is not set +# CONFIG_INSTALL is not set +# CONFIG_FEATURE_INSTALL_LONG_OPTIONS is not set +# CONFIG_LN is not set +# CONFIG_LOGNAME is not set +# CONFIG_LS is not set +# CONFIG_FEATURE_LS_FILETYPES is not set +# CONFIG_FEATURE_LS_FOLLOWLINKS is not set +# CONFIG_FEATURE_LS_RECURSIVE is not set +# CONFIG_FEATURE_LS_SORTFILES is not set +# CONFIG_FEATURE_LS_TIMESTAMPS is not set +# CONFIG_FEATURE_LS_USERNAME is not set +# CONFIG_FEATURE_LS_COLOR is not set +# CONFIG_FEATURE_LS_COLOR_IS_DEFAULT is not set +# CONFIG_MD5SUM is not set +# CONFIG_MKDIR is not set +# CONFIG_FEATURE_MKDIR_LONG_OPTIONS is not set +# CONFIG_MKFIFO is not set +# CONFIG_MKNOD is not set +# CONFIG_MV is not set +# CONFIG_FEATURE_MV_LONG_OPTIONS is not set +# CONFIG_NICE is not set +# CONFIG_NOHUP is not set +# CONFIG_OD is not set +# CONFIG_PRINTENV is not set +# CONFIG_PRINTF is not set +# CONFIG_PWD is not set +# CONFIG_READLINK is not set +# CONFIG_FEATURE_READLINK_FOLLOW is not set +# CONFIG_REALPATH is not set +# CONFIG_RM is not set +# CONFIG_RMDIR is not set +# CONFIG_FEATURE_RMDIR_LONG_OPTIONS is not set +# CONFIG_SEQ is not set +# CONFIG_SHA1SUM is not set +# CONFIG_SHA256SUM is not set +# CONFIG_SHA512SUM is not set +# CONFIG_SHA3SUM is not set +# CONFIG_SLEEP is not set +# CONFIG_FEATURE_FANCY_SLEEP is not set +# CONFIG_FEATURE_FLOAT_SLEEP is not set +# CONFIG_SORT is not set +# CONFIG_FEATURE_SORT_BIG is not set +# CONFIG_SPLIT is not set +# CONFIG_FEATURE_SPLIT_FANCY is not set +# CONFIG_STAT is not set +# CONFIG_FEATURE_STAT_FORMAT is not set +# CONFIG_STTY is not set +# CONFIG_SUM is not set +# CONFIG_SYNC is not set +# CONFIG_TAC is not set +# CONFIG_TAIL is not set +# CONFIG_FEATURE_FANCY_TAIL is not set +# CONFIG_TEE is not set +# CONFIG_FEATURE_TEE_USE_BLOCK_IO is not set +# CONFIG_TRUE is not set +# CONFIG_TTY is not set +# CONFIG_UNAME is not set +# CONFIG_UNEXPAND is not set +# CONFIG_FEATURE_UNEXPAND_LONG_OPTIONS is not set +# CONFIG_UNIQ is not set +# CONFIG_USLEEP is not set +# CONFIG_UUDECODE is not set +# CONFIG_UUENCODE is not set +# CONFIG_WC is not set +# CONFIG_FEATURE_WC_LARGE is not set +# CONFIG_WHOAMI is not set +# CONFIG_YES is not set +# CONFIG_FEATURE_PRESERVE_HARDLINKS is not set +# CONFIG_FEATURE_AUTOWIDTH is not set +# CONFIG_FEATURE_HUMAN_READABLE is not set +# CONFIG_FEATURE_MD5_SHA1_SUM_CHECK is not set + +# +# Console Utilities +# +# CONFIG_CHVT is not set +# CONFIG_FGCONSOLE is not set +# CONFIG_CLEAR is not set +# CONFIG_DEALLOCVT is not set +# CONFIG_DUMPKMAP is not set +# CONFIG_KBD_MODE is not set +# CONFIG_LOADFONT is not set +# CONFIG_LOADKMAP is not set +# CONFIG_OPENVT is not set +# CONFIG_RESET is not set +# CONFIG_RESIZE is not set +# CONFIG_FEATURE_RESIZE_PRINT is not set +# CONFIG_SETCONSOLE is not set +# CONFIG_FEATURE_SETCONSOLE_LONG_OPTIONS is not set +# CONFIG_SETFONT is not set +# CONFIG_FEATURE_SETFONT_TEXTUAL_MAP is not set +CONFIG_DEFAULT_SETFONT_DIR="" +# CONFIG_SETKEYCODES is not set +# CONFIG_SETLOGCONS is not set +# CONFIG_SHOWKEY is not set +# CONFIG_FEATURE_LOADFONT_PSF2 is not set +# CONFIG_FEATURE_LOADFONT_RAW is not set + +# +# Debian Utilities +# +# CONFIG_MKTEMP is not set +# CONFIG_PIPE_PROGRESS is not set +# CONFIG_RUN_PARTS is not set +# CONFIG_FEATURE_RUN_PARTS_LONG_OPTIONS is not set +# CONFIG_FEATURE_RUN_PARTS_FANCY is not set +# CONFIG_START_STOP_DAEMON is not set +# CONFIG_FEATURE_START_STOP_DAEMON_FANCY is not set +# CONFIG_FEATURE_START_STOP_DAEMON_LONG_OPTIONS is not set +# CONFIG_WHICH is not set + +# +# Editors +# +# CONFIG_AWK is not set +# CONFIG_FEATURE_AWK_LIBM is not set +# CONFIG_FEATURE_AWK_GNU_EXTENSIONS is not set +# CONFIG_CMP is not set +# CONFIG_DIFF is not set +# CONFIG_FEATURE_DIFF_LONG_OPTIONS is not set +# CONFIG_FEATURE_DIFF_DIR is not set +# CONFIG_ED is not set +# CONFIG_PATCH is not set +# CONFIG_SED is not set +# CONFIG_VI is not set +CONFIG_FEATURE_VI_MAX_LEN=0 +# CONFIG_FEATURE_VI_8BIT is not set +# CONFIG_FEATURE_VI_COLON is not set +# CONFIG_FEATURE_VI_YANKMARK is not set +# CONFIG_FEATURE_VI_SEARCH is not set +# CONFIG_FEATURE_VI_REGEX_SEARCH is not set +# CONFIG_FEATURE_VI_USE_SIGNALS is not set +# CONFIG_FEATURE_VI_DOT_CMD is not set +# CONFIG_FEATURE_VI_READONLY is not set +# CONFIG_FEATURE_VI_SETOPTS is not set +# CONFIG_FEATURE_VI_SET is not set +# CONFIG_FEATURE_VI_WIN_RESIZE is not set +# CONFIG_FEATURE_VI_ASK_TERMINAL is not set +# CONFIG_FEATURE_ALLOW_EXEC is not set + +# +# Finding Utilities +# +# CONFIG_FIND is not set +# CONFIG_FEATURE_FIND_PRINT0 is not set +# CONFIG_FEATURE_FIND_MTIME is not set +# CONFIG_FEATURE_FIND_MMIN is not set +# CONFIG_FEATURE_FIND_PERM is not set +# CONFIG_FEATURE_FIND_TYPE is not set +# CONFIG_FEATURE_FIND_XDEV is not set +# CONFIG_FEATURE_FIND_MAXDEPTH is not set +# CONFIG_FEATURE_FIND_NEWER is not set +# CONFIG_FEATURE_FIND_INUM is not set +# CONFIG_FEATURE_FIND_EXEC is not set +# CONFIG_FEATURE_FIND_USER is not set +# CONFIG_FEATURE_FIND_GROUP is not set +# CONFIG_FEATURE_FIND_NOT is not set +# CONFIG_FEATURE_FIND_DEPTH is not set +# CONFIG_FEATURE_FIND_PAREN is not set +# CONFIG_FEATURE_FIND_SIZE is not set +# CONFIG_FEATURE_FIND_PRUNE is not set +# CONFIG_FEATURE_FIND_DELETE is not set +# CONFIG_FEATURE_FIND_PATH is not set +# CONFIG_FEATURE_FIND_REGEX is not set +# CONFIG_FEATURE_FIND_CONTEXT is not set +# CONFIG_FEATURE_FIND_LINKS is not set +# CONFIG_GREP is not set +# CONFIG_FEATURE_GREP_EGREP_ALIAS is not set +# CONFIG_FEATURE_GREP_FGREP_ALIAS is not set +# CONFIG_FEATURE_GREP_CONTEXT is not set +# CONFIG_XARGS is not set +# CONFIG_FEATURE_XARGS_SUPPORT_CONFIRMATION is not set +# CONFIG_FEATURE_XARGS_SUPPORT_QUOTES is not set +# CONFIG_FEATURE_XARGS_SUPPORT_TERMOPT is not set +# CONFIG_FEATURE_XARGS_SUPPORT_ZERO_TERM is not set + +# +# Init Utilities +# +# CONFIG_BOOTCHARTD is not set +# CONFIG_FEATURE_BOOTCHARTD_BLOATED_HEADER is not set +# CONFIG_FEATURE_BOOTCHARTD_CONFIG_FILE is not set +# CONFIG_HALT is not set +# CONFIG_FEATURE_CALL_TELINIT is not set +CONFIG_TELINIT_PATH="" +# CONFIG_INIT is not set +# CONFIG_FEATURE_USE_INITTAB is not set +# CONFIG_FEATURE_KILL_REMOVED is not set +CONFIG_FEATURE_KILL_DELAY=0 +# CONFIG_FEATURE_INIT_SCTTY is not set +# CONFIG_FEATURE_INIT_SYSLOG is not set +# CONFIG_FEATURE_EXTRA_QUIET is not set +# CONFIG_FEATURE_INIT_COREDUMPS is not set +# CONFIG_FEATURE_INITRD is not set +CONFIG_INIT_TERMINAL_TYPE="" +# CONFIG_MESG is not set +# CONFIG_FEATURE_MESG_ENABLE_ONLY_GROUP is not set + +# +# Login/Password Management Utilities +# +# CONFIG_ADD_SHELL is not set +# CONFIG_REMOVE_SHELL is not set +# CONFIG_FEATURE_SHADOWPASSWDS is not set +# CONFIG_USE_BB_PWD_GRP is not set +# CONFIG_USE_BB_SHADOW is not set +# CONFIG_USE_BB_CRYPT is not set +# CONFIG_USE_BB_CRYPT_SHA is not set +# CONFIG_ADDUSER is not set +# CONFIG_FEATURE_ADDUSER_LONG_OPTIONS is not set +# CONFIG_FEATURE_CHECK_NAMES is not set +CONFIG_FIRST_SYSTEM_ID=0 +CONFIG_LAST_SYSTEM_ID=0 +# CONFIG_ADDGROUP is not set +# CONFIG_FEATURE_ADDGROUP_LONG_OPTIONS is not set +# CONFIG_FEATURE_ADDUSER_TO_GROUP is not set +# CONFIG_DELUSER is not set +# CONFIG_DELGROUP is not set +# CONFIG_FEATURE_DEL_USER_FROM_GROUP is not set +# CONFIG_GETTY is not set +# CONFIG_LOGIN is not set +# CONFIG_LOGIN_SESSION_AS_CHILD is not set +# CONFIG_PAM is not set +# CONFIG_LOGIN_SCRIPTS is not set +# CONFIG_FEATURE_NOLOGIN is not set +# CONFIG_FEATURE_SECURETTY is not set +# CONFIG_PASSWD is not set +# CONFIG_FEATURE_PASSWD_WEAK_CHECK is not set +# CONFIG_CRYPTPW is not set +# CONFIG_CHPASSWD is not set +CONFIG_FEATURE_DEFAULT_PASSWD_ALGO="" +# CONFIG_SU is not set +# CONFIG_FEATURE_SU_SYSLOG is not set +# CONFIG_FEATURE_SU_CHECKS_SHELLS is not set +# CONFIG_SULOGIN is not set +# CONFIG_VLOCK is not set + +# +# Linux Ext2 FS Progs +# +# CONFIG_CHATTR is not set +# CONFIG_FSCK is not set +# CONFIG_LSATTR is not set +# CONFIG_TUNE2FS is not set + +# +# Linux Module Utilities +# +# CONFIG_MODINFO is not set +# CONFIG_MODPROBE_SMALL is not set +# CONFIG_FEATURE_MODPROBE_SMALL_OPTIONS_ON_CMDLINE is not set +# CONFIG_FEATURE_MODPROBE_SMALL_CHECK_ALREADY_LOADED is not set +CONFIG_INSMOD=y +CONFIG_RMMOD=y +CONFIG_LSMOD=y +CONFIG_FEATURE_LSMOD_PRETTY_2_6_OUTPUT=y +CONFIG_MODPROBE=y +# CONFIG_FEATURE_MODPROBE_BLACKLIST is not set +# CONFIG_DEPMOD is not set + +# +# Options common to multiple modutils +# +# CONFIG_FEATURE_2_4_MODULES is not set +# CONFIG_FEATURE_INSMOD_TRY_MMAP is not set +# CONFIG_FEATURE_INSMOD_VERSION_CHECKING is not set +# CONFIG_FEATURE_INSMOD_KSYMOOPS_SYMBOLS is not set +# CONFIG_FEATURE_INSMOD_LOADINKMEM is not set +# CONFIG_FEATURE_INSMOD_LOAD_MAP is not set +# CONFIG_FEATURE_INSMOD_LOAD_MAP_FULL is not set +CONFIG_FEATURE_CHECK_TAINTED_MODULE=y +CONFIG_FEATURE_MODUTILS_ALIAS=y +CONFIG_FEATURE_MODUTILS_SYMBOLS=y +CONFIG_DEFAULT_MODULES_DIR="/lib/modules" +CONFIG_DEFAULT_DEPMOD_FILE="modules.dep" + +# +# Linux System Utilities +# +# CONFIG_BLOCKDEV is not set +# CONFIG_FSTRIM is not set +# CONFIG_MDEV is not set +# CONFIG_FEATURE_MDEV_CONF is not set +# CONFIG_FEATURE_MDEV_RENAME is not set +# CONFIG_FEATURE_MDEV_RENAME_REGEXP is not set +# CONFIG_FEATURE_MDEV_EXEC is not set +# CONFIG_FEATURE_MDEV_LOAD_FIRMWARE is not set +# CONFIG_REV is not set +# CONFIG_ACPID is not set +# CONFIG_FEATURE_ACPID_COMPAT is not set +# CONFIG_BLKID is not set +# CONFIG_FEATURE_BLKID_TYPE is not set +# CONFIG_DMESG is not set +# CONFIG_FEATURE_DMESG_PRETTY is not set +# CONFIG_FBSET is not set +# CONFIG_FEATURE_FBSET_FANCY is not set +# CONFIG_FEATURE_FBSET_READMODE is not set +# CONFIG_FDFLUSH is not set +# CONFIG_FDFORMAT is not set +# CONFIG_FDISK is not set +# CONFIG_FDISK_SUPPORT_LARGE_DISKS is not set +# CONFIG_FEATURE_FDISK_WRITABLE is not set +# CONFIG_FEATURE_AIX_LABEL is not set +# CONFIG_FEATURE_SGI_LABEL is not set +# CONFIG_FEATURE_SUN_LABEL is not set +# CONFIG_FEATURE_OSF_LABEL is not set +# CONFIG_FEATURE_GPT_LABEL is not set +# CONFIG_FEATURE_FDISK_ADVANCED is not set +# CONFIG_FINDFS is not set +# CONFIG_FLOCK is not set +# CONFIG_FREERAMDISK is not set +# CONFIG_FSCK_MINIX is not set +# CONFIG_MKFS_EXT2 is not set +# CONFIG_MKFS_MINIX is not set +# CONFIG_FEATURE_MINIX2 is not set +# CONFIG_MKFS_REISER is not set +# CONFIG_MKFS_VFAT is not set +# CONFIG_GETOPT is not set +# CONFIG_FEATURE_GETOPT_LONG is not set +# CONFIG_HEXDUMP is not set +# CONFIG_FEATURE_HEXDUMP_REVERSE is not set +# CONFIG_HD is not set +# CONFIG_HWCLOCK is not set +# CONFIG_FEATURE_HWCLOCK_LONG_OPTIONS is not set +# CONFIG_FEATURE_HWCLOCK_ADJTIME_FHS is not set +# CONFIG_IPCRM is not set +# CONFIG_IPCS is not set +# CONFIG_LOSETUP is not set +# CONFIG_LSPCI is not set +# CONFIG_LSUSB is not set +# CONFIG_MKSWAP is not set +# CONFIG_FEATURE_MKSWAP_UUID is not set +# CONFIG_MORE is not set +# CONFIG_MOUNT is not set +# CONFIG_FEATURE_MOUNT_FAKE is not set +# CONFIG_FEATURE_MOUNT_VERBOSE is not set +# CONFIG_FEATURE_MOUNT_HELPERS is not set +# CONFIG_FEATURE_MOUNT_LABEL is not set +# CONFIG_FEATURE_MOUNT_NFS is not set +# CONFIG_FEATURE_MOUNT_CIFS is not set +# CONFIG_FEATURE_MOUNT_FLAGS is not set +# CONFIG_FEATURE_MOUNT_FSTAB is not set +# CONFIG_PIVOT_ROOT is not set +# CONFIG_RDATE is not set +# CONFIG_RDEV is not set +# CONFIG_READPROFILE is not set +# CONFIG_RTCWAKE is not set +# CONFIG_SCRIPT is not set +# CONFIG_SCRIPTREPLAY is not set +# CONFIG_SETARCH is not set +# CONFIG_SWAPONOFF is not set +# CONFIG_FEATURE_SWAPON_PRI is not set +# CONFIG_SWITCH_ROOT is not set +# CONFIG_UMOUNT is not set +# CONFIG_FEATURE_UMOUNT_ALL is not set +# CONFIG_FEATURE_MOUNT_LOOP is not set +# CONFIG_FEATURE_MOUNT_LOOP_CREATE is not set +# CONFIG_FEATURE_MTAB_SUPPORT is not set +# CONFIG_VOLUMEID is not set +# CONFIG_FEATURE_VOLUMEID_BTRFS is not set +# CONFIG_FEATURE_VOLUMEID_CRAMFS is not set +# CONFIG_FEATURE_VOLUMEID_EXFAT is not set +# CONFIG_FEATURE_VOLUMEID_EXT is not set +# CONFIG_FEATURE_VOLUMEID_F2FS is not set +# CONFIG_FEATURE_VOLUMEID_FAT is not set +# CONFIG_FEATURE_VOLUMEID_HFS is not set +# CONFIG_FEATURE_VOLUMEID_ISO9660 is not set +# CONFIG_FEATURE_VOLUMEID_JFS is not set +# CONFIG_FEATURE_VOLUMEID_LINUXRAID is not set +# CONFIG_FEATURE_VOLUMEID_LINUXSWAP is not set +# CONFIG_FEATURE_VOLUMEID_LUKS is not set +# CONFIG_FEATURE_VOLUMEID_NILFS is not set +# CONFIG_FEATURE_VOLUMEID_NTFS is not set +# CONFIG_FEATURE_VOLUMEID_OCFS2 is not set +# CONFIG_FEATURE_VOLUMEID_REISERFS is not set +# CONFIG_FEATURE_VOLUMEID_ROMFS is not set +# CONFIG_FEATURE_VOLUMEID_SQUASHFS is not set +# CONFIG_FEATURE_VOLUMEID_SYSV is not set +# CONFIG_FEATURE_VOLUMEID_UDF is not set +# CONFIG_FEATURE_VOLUMEID_XFS is not set + +# +# Miscellaneous Utilities +# +# CONFIG_CONSPY is not set +# CONFIG_LESS is not set +CONFIG_FEATURE_LESS_MAXLINES=0 +# CONFIG_FEATURE_LESS_BRACKETS is not set +# CONFIG_FEATURE_LESS_FLAGS is not set +# CONFIG_FEATURE_LESS_MARKS is not set +# CONFIG_FEATURE_LESS_REGEXP is not set +# CONFIG_FEATURE_LESS_WINCH is not set +# CONFIG_FEATURE_LESS_ASK_TERMINAL is not set +# CONFIG_FEATURE_LESS_DASHCMD is not set +# CONFIG_FEATURE_LESS_LINENUMS is not set +# CONFIG_NANDWRITE is not set +# CONFIG_NANDDUMP is not set +# CONFIG_RFKILL is not set +# CONFIG_SETSERIAL is not set +# CONFIG_UBIATTACH is not set +# CONFIG_UBIDETACH is not set +# CONFIG_UBIMKVOL is not set +# CONFIG_UBIRMVOL is not set +# CONFIG_UBIRSVOL is not set +# CONFIG_UBIUPDATEVOL is not set +# CONFIG_WALL is not set +# CONFIG_ADJTIMEX is not set +# CONFIG_BBCONFIG is not set +# CONFIG_FEATURE_COMPRESS_BBCONFIG is not set +# CONFIG_BEEP is not set +CONFIG_FEATURE_BEEP_FREQ=0 +CONFIG_FEATURE_BEEP_LENGTH_MS=0 +# CONFIG_CHAT is not set +# CONFIG_FEATURE_CHAT_NOFAIL is not set +# CONFIG_FEATURE_CHAT_TTY_HIFI is not set +# CONFIG_FEATURE_CHAT_IMPLICIT_CR is not set +# CONFIG_FEATURE_CHAT_SWALLOW_OPTS is not set +# CONFIG_FEATURE_CHAT_SEND_ESCAPES is not set +# CONFIG_FEATURE_CHAT_VAR_ABORT_LEN is not set +# CONFIG_FEATURE_CHAT_CLR_ABORT is not set +# CONFIG_CHRT is not set +# CONFIG_CROND is not set +# CONFIG_FEATURE_CROND_D is not set +# CONFIG_FEATURE_CROND_CALL_SENDMAIL is not set +CONFIG_FEATURE_CROND_DIR="" +# CONFIG_CRONTAB is not set +# CONFIG_DC is not set +# CONFIG_FEATURE_DC_LIBM is not set +# CONFIG_DEVFSD is not set +# CONFIG_DEVFSD_MODLOAD is not set +# CONFIG_DEVFSD_FG_NP is not set +# CONFIG_DEVFSD_VERBOSE is not set +# CONFIG_FEATURE_DEVFS is not set +# CONFIG_DEVMEM is not set +# CONFIG_EJECT is not set +# CONFIG_FEATURE_EJECT_SCSI is not set +# CONFIG_FBSPLASH is not set +# CONFIG_FLASHCP is not set +# CONFIG_FLASH_LOCK is not set +# CONFIG_FLASH_UNLOCK is not set +# CONFIG_FLASH_ERASEALL is not set +# CONFIG_IONICE is not set +# CONFIG_INOTIFYD is not set +# CONFIG_LAST is not set +# CONFIG_FEATURE_LAST_SMALL is not set +# CONFIG_FEATURE_LAST_FANCY is not set +# CONFIG_HDPARM is not set +# CONFIG_FEATURE_HDPARM_GET_IDENTITY is not set +# CONFIG_FEATURE_HDPARM_HDIO_SCAN_HWIF is not set +# CONFIG_FEATURE_HDPARM_HDIO_UNREGISTER_HWIF is not set +# CONFIG_FEATURE_HDPARM_HDIO_DRIVE_RESET is not set +# CONFIG_FEATURE_HDPARM_HDIO_TRISTATE_HWIF is not set +# CONFIG_FEATURE_HDPARM_HDIO_GETSET_DMA is not set +# CONFIG_MAKEDEVS is not set +# CONFIG_FEATURE_MAKEDEVS_LEAF is not set +# CONFIG_FEATURE_MAKEDEVS_TABLE is not set +# CONFIG_MAN is not set +# CONFIG_MICROCOM is not set +# CONFIG_MOUNTPOINT is not set +# CONFIG_MT is not set +# CONFIG_RAIDAUTORUN is not set +# CONFIG_READAHEAD is not set +# CONFIG_RUNLEVEL is not set +# CONFIG_RX is not set +# CONFIG_SETSID is not set +# CONFIG_STRINGS is not set +# CONFIG_TASKSET is not set +# CONFIG_FEATURE_TASKSET_FANCY is not set +# CONFIG_TIME is not set +# CONFIG_TIMEOUT is not set +# CONFIG_TTYSIZE is not set +# CONFIG_VOLNAME is not set +# CONFIG_WATCHDOG is not set + +# +# Networking Utilities +# +# CONFIG_NAMEIF is not set +# CONFIG_FEATURE_NAMEIF_EXTENDED is not set +# CONFIG_NBDCLIENT is not set +# CONFIG_NC is not set +# CONFIG_NC_SERVER is not set +# CONFIG_NC_EXTRA is not set +# CONFIG_NC_110_COMPAT is not set +# CONFIG_PING is not set +# CONFIG_PING6 is not set +# CONFIG_FEATURE_FANCY_PING is not set +# CONFIG_WHOIS is not set +# CONFIG_FEATURE_IPV6 is not set +# CONFIG_FEATURE_UNIX_LOCAL is not set +# CONFIG_FEATURE_PREFER_IPV4_ADDRESS is not set +# CONFIG_VERBOSE_RESOLUTION_ERRORS is not set +# CONFIG_ARP is not set +# CONFIG_ARPING is not set +# CONFIG_BRCTL is not set +# CONFIG_FEATURE_BRCTL_FANCY is not set +# CONFIG_FEATURE_BRCTL_SHOW is not set +# CONFIG_DNSD is not set +# CONFIG_ETHER_WAKE is not set +# CONFIG_FAKEIDENTD is not set +# CONFIG_FTPD is not set +# CONFIG_FEATURE_FTP_WRITE is not set +# CONFIG_FEATURE_FTPD_ACCEPT_BROKEN_LIST is not set +# CONFIG_FTPGET is not set +# CONFIG_FTPPUT is not set +# CONFIG_FEATURE_FTPGETPUT_LONG_OPTIONS is not set +# CONFIG_HOSTNAME is not set +# CONFIG_HTTPD is not set +# CONFIG_FEATURE_HTTPD_RANGES is not set +# CONFIG_FEATURE_HTTPD_USE_SENDFILE is not set +# CONFIG_FEATURE_HTTPD_SETUID is not set +# CONFIG_FEATURE_HTTPD_BASIC_AUTH is not set +# CONFIG_FEATURE_HTTPD_AUTH_MD5 is not set +# CONFIG_FEATURE_HTTPD_CGI is not set +# CONFIG_FEATURE_HTTPD_CONFIG_WITH_SCRIPT_INTERPR is not set +# CONFIG_FEATURE_HTTPD_SET_REMOTE_PORT_TO_ENV is not set +# CONFIG_FEATURE_HTTPD_ENCODE_URL_STR is not set +# CONFIG_FEATURE_HTTPD_ERROR_PAGES is not set +# CONFIG_FEATURE_HTTPD_PROXY is not set +# CONFIG_FEATURE_HTTPD_GZIP is not set +# CONFIG_IFCONFIG is not set +# CONFIG_FEATURE_IFCONFIG_STATUS is not set +# CONFIG_FEATURE_IFCONFIG_SLIP is not set +# CONFIG_FEATURE_IFCONFIG_MEMSTART_IOADDR_IRQ is not set +# CONFIG_FEATURE_IFCONFIG_HW is not set +# CONFIG_FEATURE_IFCONFIG_BROADCAST_PLUS is not set +# CONFIG_IFENSLAVE is not set +# CONFIG_IFPLUGD is not set +# CONFIG_IFUPDOWN is not set +CONFIG_IFUPDOWN_IFSTATE_PATH="" +# CONFIG_FEATURE_IFUPDOWN_IP is not set +# CONFIG_FEATURE_IFUPDOWN_IP_BUILTIN is not set +# CONFIG_FEATURE_IFUPDOWN_IFCONFIG_BUILTIN is not set +# CONFIG_FEATURE_IFUPDOWN_IPV4 is not set +# CONFIG_FEATURE_IFUPDOWN_IPV6 is not set +# CONFIG_FEATURE_IFUPDOWN_MAPPING is not set +# CONFIG_FEATURE_IFUPDOWN_EXTERNAL_DHCP is not set +# CONFIG_INETD is not set +# CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_ECHO is not set +# CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_DISCARD is not set +# CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_TIME is not set +# CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_DAYTIME is not set +# CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_CHARGEN is not set +# CONFIG_FEATURE_INETD_RPC is not set +# CONFIG_IP is not set +# CONFIG_FEATURE_IP_ADDRESS is not set +# CONFIG_FEATURE_IP_LINK is not set +# CONFIG_FEATURE_IP_ROUTE is not set +# CONFIG_FEATURE_IP_TUNNEL is not set +# CONFIG_FEATURE_IP_RULE is not set +# CONFIG_FEATURE_IP_SHORT_FORMS is not set +# CONFIG_FEATURE_IP_RARE_PROTOCOLS is not set +# CONFIG_IPADDR is not set +# CONFIG_IPLINK is not set +# CONFIG_IPROUTE is not set +# CONFIG_IPTUNNEL is not set +# CONFIG_IPRULE is not set +# CONFIG_IPCALC is not set +# CONFIG_FEATURE_IPCALC_FANCY is not set +# CONFIG_FEATURE_IPCALC_LONG_OPTIONS is not set +# CONFIG_NETSTAT is not set +# CONFIG_FEATURE_NETSTAT_WIDE is not set +# CONFIG_FEATURE_NETSTAT_PRG is not set +# CONFIG_NSLOOKUP is not set +# CONFIG_NTPD is not set +# CONFIG_FEATURE_NTPD_SERVER is not set +# CONFIG_PSCAN is not set +# CONFIG_ROUTE is not set +# CONFIG_SLATTACH is not set +# CONFIG_TCPSVD is not set +# CONFIG_TELNET is not set +# CONFIG_FEATURE_TELNET_TTYPE is not set +# CONFIG_FEATURE_TELNET_AUTOLOGIN is not set +# CONFIG_TELNETD is not set +# CONFIG_FEATURE_TELNETD_STANDALONE is not set +# CONFIG_FEATURE_TELNETD_INETD_WAIT is not set +# CONFIG_TFTP is not set +# CONFIG_TFTPD is not set +# CONFIG_FEATURE_TFTP_GET is not set +# CONFIG_FEATURE_TFTP_PUT is not set +# CONFIG_FEATURE_TFTP_BLOCKSIZE is not set +# CONFIG_FEATURE_TFTP_PROGRESS_BAR is not set +# CONFIG_TFTP_DEBUG is not set +# CONFIG_TRACEROUTE is not set +# CONFIG_TRACEROUTE6 is not set +# CONFIG_FEATURE_TRACEROUTE_VERBOSE is not set +# CONFIG_FEATURE_TRACEROUTE_SOURCE_ROUTE is not set +# CONFIG_FEATURE_TRACEROUTE_USE_ICMP is not set +# CONFIG_TUNCTL is not set +# CONFIG_FEATURE_TUNCTL_UG is not set +# CONFIG_UDHCPC6 is not set +# CONFIG_UDHCPD is not set +# CONFIG_DHCPRELAY is not set +# CONFIG_DUMPLEASES is not set +# CONFIG_FEATURE_UDHCPD_WRITE_LEASES_EARLY is not set +# CONFIG_FEATURE_UDHCPD_BASE_IP_ON_MAC is not set +CONFIG_DHCPD_LEASES_FILE="" +# CONFIG_UDHCPC is not set +# CONFIG_FEATURE_UDHCPC_ARPING is not set +# CONFIG_FEATURE_UDHCP_PORT is not set +CONFIG_UDHCP_DEBUG=0 +# CONFIG_FEATURE_UDHCP_RFC3397 is not set +# CONFIG_FEATURE_UDHCP_8021Q is not set +CONFIG_UDHCPC_DEFAULT_SCRIPT="" +CONFIG_UDHCPC_SLACK_FOR_BUGGY_SERVERS=0 +CONFIG_IFUPDOWN_UDHCPC_CMD_OPTIONS="" +# CONFIG_UDPSVD is not set +# CONFIG_VCONFIG is not set +# CONFIG_WGET is not set +# CONFIG_FEATURE_WGET_STATUSBAR is not set +# CONFIG_FEATURE_WGET_AUTHENTICATION is not set +# CONFIG_FEATURE_WGET_LONG_OPTIONS is not set +# CONFIG_FEATURE_WGET_TIMEOUT is not set +# CONFIG_ZCIP is not set + +# +# Print Utilities +# +# CONFIG_LPD is not set +# CONFIG_LPR is not set +# CONFIG_LPQ is not set + +# +# Mail Utilities +# +# CONFIG_MAKEMIME is not set +CONFIG_FEATURE_MIME_CHARSET="" +# CONFIG_POPMAILDIR is not set +# CONFIG_FEATURE_POPMAILDIR_DELIVERY is not set +# CONFIG_REFORMIME is not set +# CONFIG_FEATURE_REFORMIME_COMPAT is not set +# CONFIG_SENDMAIL is not set + +# +# Process Utilities +# +# CONFIG_IOSTAT is not set +# CONFIG_LSOF is not set +# CONFIG_MPSTAT is not set +# CONFIG_NMETER is not set +# CONFIG_PMAP is not set +# CONFIG_POWERTOP is not set +# CONFIG_PSTREE is not set +# CONFIG_PWDX is not set +# CONFIG_SMEMCAP is not set +# CONFIG_TOP is not set +# CONFIG_FEATURE_TOP_CPU_USAGE_PERCENTAGE is not set +# CONFIG_FEATURE_TOP_CPU_GLOBAL_PERCENTS is not set +# CONFIG_FEATURE_TOP_SMP_CPU is not set +# CONFIG_FEATURE_TOP_DECIMALS is not set +# CONFIG_FEATURE_TOP_SMP_PROCESS is not set +# CONFIG_FEATURE_TOPMEM is not set +# CONFIG_UPTIME is not set +# CONFIG_FEATURE_UPTIME_UTMP_SUPPORT is not set +# CONFIG_FREE is not set +# CONFIG_FUSER is not set +# CONFIG_KILL is not set +# CONFIG_KILLALL is not set +# CONFIG_KILLALL5 is not set +# CONFIG_PGREP is not set +# CONFIG_PIDOF is not set +# CONFIG_FEATURE_PIDOF_SINGLE is not set +# CONFIG_FEATURE_PIDOF_OMIT is not set +# CONFIG_PKILL is not set +CONFIG_PS=y +# CONFIG_FEATURE_PS_WIDE is not set +# CONFIG_FEATURE_PS_LONG is not set +# CONFIG_FEATURE_PS_TIME is not set +# CONFIG_FEATURE_PS_ADDITIONAL_COLUMNS is not set +# CONFIG_FEATURE_PS_UNUSUAL_SYSTEMS is not set +# CONFIG_RENICE is not set +# CONFIG_BB_SYSCTL is not set +# CONFIG_FEATURE_SHOW_THREADS is not set +# CONFIG_WATCH is not set + +# +# Runit Utilities +# +# CONFIG_RUNSV is not set +# CONFIG_RUNSVDIR is not set +# CONFIG_FEATURE_RUNSVDIR_LOG is not set +# CONFIG_SV is not set +CONFIG_SV_DEFAULT_SERVICE_DIR="" +# CONFIG_SVLOGD is not set +# CONFIG_CHPST is not set +# CONFIG_SETUIDGID is not set +# CONFIG_ENVUIDGID is not set +# CONFIG_ENVDIR is not set +# CONFIG_SOFTLIMIT is not set +# CONFIG_CHCON is not set +# CONFIG_FEATURE_CHCON_LONG_OPTIONS is not set +# CONFIG_GETENFORCE is not set +# CONFIG_GETSEBOOL is not set +# CONFIG_LOAD_POLICY is not set +# CONFIG_MATCHPATHCON is not set +# CONFIG_RESTORECON is not set +# CONFIG_RUNCON is not set +# CONFIG_FEATURE_RUNCON_LONG_OPTIONS is not set +# CONFIG_SELINUXENABLED is not set +# CONFIG_SETENFORCE is not set +# CONFIG_SETFILES is not set +# CONFIG_FEATURE_SETFILES_CHECK_OPTION is not set +# CONFIG_SETSEBOOL is not set +# CONFIG_SESTATUS is not set + +# +# Shells +# +# CONFIG_ASH is not set +# CONFIG_ASH_BASH_COMPAT is not set +# CONFIG_ASH_IDLE_TIMEOUT is not set +# CONFIG_ASH_JOB_CONTROL is not set +# CONFIG_ASH_ALIAS is not set +# CONFIG_ASH_GETOPTS is not set +# CONFIG_ASH_BUILTIN_ECHO is not set +# CONFIG_ASH_BUILTIN_PRINTF is not set +# CONFIG_ASH_BUILTIN_TEST is not set +# CONFIG_ASH_CMDCMD is not set +# CONFIG_ASH_MAIL is not set +# CONFIG_ASH_OPTIMIZE_FOR_SIZE is not set +# CONFIG_ASH_RANDOM_SUPPORT is not set +# CONFIG_ASH_EXPAND_PRMT is not set +# CONFIG_CTTYHACK is not set +# CONFIG_HUSH is not set +# CONFIG_HUSH_BASH_COMPAT is not set +# CONFIG_HUSH_BRACE_EXPANSION is not set +# CONFIG_HUSH_HELP is not set +# CONFIG_HUSH_INTERACTIVE is not set +# CONFIG_HUSH_SAVEHISTORY is not set +# CONFIG_HUSH_JOB is not set +# CONFIG_HUSH_TICK is not set +# CONFIG_HUSH_IF is not set +# CONFIG_HUSH_LOOPS is not set +# CONFIG_HUSH_CASE is not set +# CONFIG_HUSH_FUNCTIONS is not set +# CONFIG_HUSH_LOCAL is not set +# CONFIG_HUSH_RANDOM_SUPPORT is not set +# CONFIG_HUSH_EXPORT_N is not set +# CONFIG_HUSH_MODE_X is not set +# CONFIG_MSH is not set +# CONFIG_FEATURE_SH_IS_ASH is not set +# CONFIG_FEATURE_SH_IS_HUSH is not set +CONFIG_FEATURE_SH_IS_NONE=y +# CONFIG_FEATURE_BASH_IS_ASH is not set +# CONFIG_FEATURE_BASH_IS_HUSH is not set +CONFIG_FEATURE_BASH_IS_NONE=y +# CONFIG_SH_MATH_SUPPORT is not set +# CONFIG_SH_MATH_SUPPORT_64 is not set +# CONFIG_FEATURE_SH_EXTRA_QUIET is not set +# CONFIG_FEATURE_SH_STANDALONE is not set +# CONFIG_FEATURE_SH_NOFORK is not set +# CONFIG_FEATURE_SH_HISTFILESIZE is not set + +# +# System Logging Utilities +# +# CONFIG_SYSLOGD is not set +# CONFIG_FEATURE_ROTATE_LOGFILE is not set +# CONFIG_FEATURE_REMOTE_LOG is not set +# CONFIG_FEATURE_SYSLOGD_DUP is not set +# CONFIG_FEATURE_SYSLOGD_CFG is not set +CONFIG_FEATURE_SYSLOGD_READ_BUFFER_SIZE=0 +# CONFIG_FEATURE_IPC_SYSLOG is not set +CONFIG_FEATURE_IPC_SYSLOG_BUFFER_SIZE=0 +# CONFIG_LOGREAD is not set +# CONFIG_FEATURE_LOGREAD_REDUCED_LOCKING is not set +# CONFIG_FEATURE_KMSG_SYSLOG is not set +# CONFIG_KLOGD is not set +# CONFIG_FEATURE_KLOGD_KLOGCTL is not set +# CONFIG_LOGGER is not set diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/bootstrap b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/bootstrap new file mode 100644 index 00000000..86c8d449 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/bootstrap @@ -0,0 +1,8 @@ +#!/bin/bash +set -e +set -x + +cd $(dirname $0)/.. + +apt-get update +apt-get install -y build-essential wget libncurses5-dev unzip bc curl python rsync ccache diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build new file mode 100644 index 00000000..3e4a510b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +./scripts/download +./scripts/build-busybox-static +./scripts/package diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build-busybox-static b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build-busybox-static new file mode 100644 index 00000000..0a04a611 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/build-busybox-static @@ -0,0 +1,49 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +: ${ARTIFACTS:=$(pwd)/assets} +: ${BUILD:=$(pwd)/build} +: ${CONFIG:=$(pwd)/config} +: ${DIST:=$(pwd)/dist} + +mkdir -p ${BUILD} ${DIST} + +busybox_install() +{ + local conf=$1 + local bbconf=$2 + local target=$3 + + if [ "$#" = "2" ]; then + target=$2 + bbconf= + fi + + cd ${BUILD} + + local buildroot=$(ls -1 ${ARTIFACTS}/buildroot-*.tar.bz2) + + if [ ! -e "${buildroot}" ]; then + echo "Failed to find busybox archive, found : ${buildroot}" 1>&2 + return 1 + else + buildroot=$(basename $buildroot) + fi + + if [ ! -e ${buildroot/.tar.bz2//} ]; then + tar xvjf ${ARTIFACTS}/${buildroot} + fi + + cd ${buildroot/.tar.bz2//} + + cp $conf .config + if [ -n "$bbconf" ]; then + cp $bbconf package/busybox/ + fi + make oldconfig + make +} + +busybox_install ${CONFIG}/buildroot-config-static ${CONFIG}/busybox-ps-modprobe-only.config ${DIST}/rootfs-static.tar diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/clean b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/clean new file mode 100644 index 00000000..e4882661 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/clean @@ -0,0 +1,5 @@ +#!/bin/bash + +cd $(dirname $0)/.. + +rm -rf build dist diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/download b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/download new file mode 100644 index 00000000..d4b938da --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/download @@ -0,0 +1,41 @@ +#!/bin/bash +set -e +set -x + +cd $(dirname $0)/.. + +: ${ARTIFACTS:=$(pwd)/assets} + +check() +{ + local hash=$1 + local file=$2 + + if [ ! -e "$file" ]; then + return 1 + fi + + CURRENT=$(sha1sum $file | awk '{print $1}') + + [ "$hash" = "$CURRENT" ] +} + +download() +{ + mkdir -p ${ARTIFACTS} + + local url=$2 + local file=${ARTIFACTS}/$(basename $2) + local hash=$1 + + if ! check $hash $file; then + curl -sL $url > $file + fi + + if ! check $hash $file; then + echo "ERROR: $file does not match checksum $hash, got $CURRENT" 1>&2 + return 1 + fi +} + +download 8efabafe68d21616c7f2ac9fdad8427fc94a015d http://buildroot.uclibc.org/downloads/buildroot-2015.05.tar.bz2 diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/package b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/package new file mode 100644 index 00000000..2d9d9f3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/base-image/scripts/package @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +rm -rf build/stage +mkdir -p build/stage +mkdir -p dist +pushd build/stage + +BASE=$(echo ../buildroot-*/output/target/) + +mkdir -p usr/bin usr/share usr/libexec/git-core + +cp $BASE/bin/busybox usr/bin +cp $BASE/usr/bin/xz usr/bin +cp $BASE/usr/bin/git usr/bin +cp $BASE/usr/bin/ssh usr/bin +cp $BASE/usr/sbin/xtables-multi usr/bin/iptables +cp -rf $BASE/usr/share/git-core usr/share + +ln -s ../../bin/git usr/libexec/git-core/git-clone +ln -s ../../bin/git usr/libexec/git-core/git-checkout +ln -s bin usr/sbin + +# Okay, this is a hack. I won't tell anyone if you don't. +# Honestly, who uses git submodules anyways... +echo '#!/usr/bin/busybox echo' > usr/libexec/git-core/git-submodule +chmod +x usr/libexec/git-core/git-submodule + +ln -s busybox usr/bin/ps +ln -s busybox usr/bin/modprobe + +tar cvzf ../../dist/base-files.tar.gz . diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/build.sh b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/build.sh new file mode 100644 index 00000000..d0176ecb --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/build.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +export NO_TEST=true +exec $(dirname $0)/scripts/ci diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/main/main.go b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/main/main.go new file mode 100644 index 00000000..8abbb40f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/main/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "os" + + log "github.com/Sirupsen/logrus" + dockerlaunch "github.com/rancher/docker-from-scratch" +) + +func main() { + Main() +} + +func Main() { + if os.Getenv("DOCKER_LAUNCH_DEBUG") == "true" { + log.SetLevel(log.DebugLevel) + } + + if len(os.Args) < 2 { + log.Fatalf("Usage Example: %s /usr/bin/docker -d -D", os.Args[0]) + } + + args := []string{} + if len(os.Args) > 1 { + args = os.Args[2:] + } + + var config dockerlaunch.Config + args = dockerlaunch.ParseConfig(&config, args...) + + log.Debugf("Launch config %#v", config) + + _, err := dockerlaunch.LaunchDocker(&config, os.Args[1], args...) + if err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scratch.go b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scratch.go new file mode 100644 index 00000000..281ff81d --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scratch.go @@ -0,0 +1,466 @@ +package dockerlaunch + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "strings" + "syscall" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libnetwork/resolvconf" + "github.com/rancher/docker-from-scratch/util" + "github.com/rancher/netconf" +) + +const defaultPrefix = "/usr" + +var ( + mounts [][]string = [][]string{ + {"devtmpfs", "/dev", "devtmpfs", ""}, + {"none", "/dev/pts", "devpts", ""}, + {"none", "/proc", "proc", ""}, + {"none", "/run", "tmpfs", ""}, + {"none", "/sys", "sysfs", ""}, + {"none", "/sys/fs/cgroup", "tmpfs", ""}, + } +) + +type Config struct { + Fork bool + CommandName string + DnsConfig netconf.DnsConfig + BridgeName string + BridgeAddress string + BridgeMtu int + CgroupHierarchy map[string]string + LogFile string + NoLog bool +} + +func createMounts(mounts ...[]string) error { + for _, mount := range mounts { + log.Debugf("Mounting %s %s %s %s", mount[0], mount[1], mount[2], mount[3]) + err := util.Mount(mount[0], mount[1], mount[2], mount[3]) + if err != nil { + return err + } + } + + return nil +} + +func createDirs(dirs ...string) error { + for _, dir := range dirs { + if _, err := os.Stat(dir); os.IsNotExist(err) { + log.Debugf("Creating %s", dir) + err = os.MkdirAll(dir, 0755) + if err != nil { + return err + } + } + } + + return nil +} + +func mountCgroups(hierarchyConfig map[string]string) error { + f, err := os.Open("/proc/cgroups") + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + + hierarchies := make(map[string][]string) + + for scanner.Scan() { + text := scanner.Text() + log.Debugf("/proc/cgroups: %s", text) + fields := strings.SplitN(text, "\t", 3) + cgroup := fields[0] + if cgroup == "" || cgroup[0] == '#' || len(fields) < 3 { + continue + } + + hierarchy := hierarchyConfig[cgroup] + if hierarchy == "" { + hierarchy = fields[1] + } + + if hierarchy == "0" { + hierarchy = cgroup + } + + hierarchies[hierarchy] = append(hierarchies[hierarchy], cgroup) + } + + for _, hierarchy := range hierarchies { + if err := mountCgroup(strings.Join(hierarchy, ",")); err != nil { + return err + } + } + + if err = scanner.Err(); err != nil { + return err + } + + log.Debug("Done mouting cgroupfs") + return nil +} + +func CreateSymlinks(pathSets [][]string) error { + for _, paths := range pathSets { + if err := CreateSymlink(paths[0], paths[1]); err != nil { + return err + } + } + + return nil +} + +func CreateSymlink(src, dest string) error { + if _, err := os.Lstat(dest); os.IsNotExist(err) { + log.Debugf("Symlinking %s => %s", src, dest) + if err = os.Symlink(src, dest); err != nil { + return err + } + } + + return nil +} + +func mountCgroup(cgroup string) error { + if err := createDirs("/sys/fs/cgroup/" + cgroup); err != nil { + return err + } + + if err := createMounts([][]string{{"none", "/sys/fs/cgroup/" + cgroup, "cgroup", cgroup}}...); err != nil { + return err + } + + parts := strings.Split(cgroup, ",") + if len(parts) > 1 { + for _, part := range parts { + if err := CreateSymlink("/sys/fs/cgroup/"+cgroup, "/sys/fs/cgroup/"+part); err != nil { + return err + } + } + } + + return nil +} + +func execDocker(config *Config, docker, cmd string, args []string) (*exec.Cmd, error) { + if len(args) > 0 && args[0] == "docker" { + args = args[1:] + } + log.Debugf("Launching Docker %s %s %v", docker, cmd, args) + + if config.Fork { + cmd := exec.Command(docker, args...) + if !config.NoLog { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + err := cmd.Start() + return cmd, err + } else { + err := syscall.Exec(docker, append([]string{cmd}, args...), os.Environ()) + return nil, err + } +} + +func copyDefault(folder, name string) error { + defaultFile := path.Join(defaultPrefix, folder, name) + if err := CopyFile(defaultFile, folder, name); err != nil { + return err + } + + return nil +} + +func defaultFiles(files ...string) error { + for _, file := range files { + dir := path.Dir(file) + name := path.Base(file) + if err := copyDefault(dir, name); err != nil { + return err + } + } + + return nil +} + +func CopyFile(src, folder, name string) error { + if _, err := os.Stat(src); os.IsNotExist(err) { + return nil + } + + dst := path.Join(folder, name) + if _, err := os.Stat(dst); err == nil { + return nil + } + + if err := createDirs(folder); err != nil { + return err + } + + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.Create(dst) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + return err +} + +func tryCreateFile(name, content string) error { + if _, err := os.Stat(name); err == nil { + return nil + } + + if err := createDirs(path.Dir(name)); err != nil { + return err + } + + return ioutil.WriteFile(name, []byte(content), 0644) +} + +func createPasswd() error { + return tryCreateFile("/etc/passwd", "root:x:0:0:root:/root:/bin/sh\n") +} + +func createGroup() error { + return tryCreateFile("/etc/group", "root:x:0:\n") +} + +func setupNetworking(config *Config) error { + if config == nil { + return nil + } + + if len(config.DnsConfig.Nameservers) != 0 { + if err := resolvconf.Build("/etc/resolv.conf", config.DnsConfig.Nameservers, config.DnsConfig.Search); err != nil { + return err + } + } + + if config.BridgeName != "" { + log.Debugf("Creating bridge %s (%s)", config.BridgeName, config.BridgeAddress) + if err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{ + Interfaces: map[string]netconf.InterfaceConfig{ + config.BridgeName: { + Address: config.BridgeAddress, + MTU: config.BridgeMtu, + Bridge: true, + }, + }, + }); err != nil { + return err + } + } + + return nil +} + +func getValue(index int, args []string) string { + val := args[index] + parts := strings.SplitN(val, "=", 2) + if len(parts) == 1 { + if len(args) > index+1 { + return args[index+1] + } else { + return "" + } + } else { + return parts[2] + } +} + +func ParseConfig(config *Config, args ...string) []string { + for i, arg := range args { + if strings.HasPrefix(arg, "--bip") { + config.BridgeAddress = getValue(i, args) + } else if strings.HasPrefix(arg, "--fixed-cidr") { + config.BridgeAddress = getValue(i, args) + } else if strings.HasPrefix(arg, "-b") || strings.HasPrefix(arg, "--bridge") { + config.BridgeName = getValue(i, args) + } else if strings.HasPrefix(arg, "--mtu") { + mtu, err := strconv.Atoi(getValue(i, args)) + if err != nil { + config.BridgeMtu = mtu + } + } + } + + if config.BridgeName != "" && config.BridgeAddress != "" { + newArgs := []string{} + skip := false + for _, arg := range args { + if skip { + skip = false + continue + } + + if arg == "--bip" { + skip = true + continue + } else if strings.HasPrefix(arg, "--bip=") { + continue + } + + newArgs = append(newArgs, arg) + } + + args = newArgs + } + + return args +} + +func PrepareFs(config *Config) error { + if err := createMounts(mounts...); err != nil { + return err + } + + if err := mountCgroups(config.CgroupHierarchy); err != nil { + return err + } + + if err := createLayout(); err != nil { + return err + } + + return nil +} + +func touchSocket(path string) error { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return err + } + return ioutil.WriteFile(path, []byte{}, 0700) +} + +func touchSockets(args ...string) error { + touched := false + + for i, arg := range args { + if strings.HasPrefix(arg, "-H") { + val := getValue(i, args) + if strings.HasPrefix(val, "unix://") { + val = val[len("unix://"):] + log.Debugf("Creating temp file at %s", val) + if err := touchSocket(val); err != nil { + return err + } + touched = true + } + } + } + + if !touched { + return touchSocket("/var/run/docker.sock") + } + + return nil +} + +func createLayout() error { + if err := createDirs("/tmp", "/root/.ssh", "/var"); err != nil { + return err + } + + return CreateSymlinks([][]string{ + {"usr/lib", "/lib"}, + {"usr/sbin", "/sbin"}, + {"../run", "/var/run"}, + }) +} + +func prepare(config *Config, docker string, args ...string) error { + os.Setenv("PATH", "/sbin:/usr/sbin:/usr/bin") + + if err := defaultFiles( + "/etc/ssl/certs/ca-certificates.crt", + "/etc/passwd", + "/etc/group", + ); err != nil { + return err + } + + if err := createPasswd(); err != nil { + return err + } + + if err := createGroup(); err != nil { + return err + } + + if err := setupNetworking(config); err != nil { + return err + } + + if err := touchSockets(args...); err != nil { + return err + } + + if err := setupLogging(config); err != nil { + return err + } + + return nil +} + +func setupLogging(config *Config) error { + if config.LogFile == "" { + return nil + } + + if err := createDirs(path.Dir(config.LogFile)); err != nil { + return err + } + + output, err := os.Create(config.LogFile) + if err != nil { + return err + } + + syscall.Dup2(int(output.Fd()), int(os.Stdout.Fd())) + syscall.Dup2(int(output.Fd()), int(os.Stderr.Fd())) + + return nil +} + +func runOrExec(config *Config, docker string, args ...string) (*exec.Cmd, error) { + if err := prepare(config, docker, args...); err != nil { + return nil, err + } + + cmd := "docker" + if config != nil && config.CommandName != "" { + cmd = config.CommandName + } + + return execDocker(config, docker, cmd, args) +} + +func LaunchDocker(config *Config, docker string, args ...string) (*exec.Cmd, error) { + if err := PrepareFs(config); err != nil { + return nil, err + } + + return runOrExec(config, docker, args...) +} diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/Dockerfile.build b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/Dockerfile.build new file mode 100644 index 00000000..e5903798 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/Dockerfile.build @@ -0,0 +1,8 @@ +FROM golang:1.4.2-cross + +RUN go get github.com/mitchellh/gox +RUN go get github.com/tools/godep + +ENV GOPATH /go/src/github.com/rancher/docker-from-scratch/Godeps/_workspace:/go + +WORKDIR /go/src/github.com/rancher/docker-from-scratch diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/build b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/build new file mode 100644 index 00000000..9cb1805a --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/build @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +mkdir -p build + +if [ ! -e build/ca-certificates.crt ]; then + ID=$(docker run -d ubuntu sh -c "apt-get update && apt-get install -y ca-certificates") + docker logs -f $ID & + docker wait $ID + docker cp $ID:/etc/ssl/certs/ca-certificates.crt build/ + docker rm -vf $ID || true +fi + +mkdir -p build +docker build -f ./scripts/Dockerfile.build -t dockerscratch-build . + +docker run --rm -v `pwd`:/go/src/github.com/rancher/docker-from-scratch dockerscratch-build godep go build -ldflags "-linkmode external -extldflags -static" -o build/dockerlaunch ./main diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/ci b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/ci new file mode 100644 index 00000000..c7a5ca64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/ci @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +docker ps >/dev/null 2>&1 || wrapdocker 2>/dev/null || true + +./download +./build +./package + +if [ -z "$NO_TEST" ]; then + ./test +fi diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/common b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/common new file mode 100644 index 00000000..678d0155 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/common @@ -0,0 +1,2 @@ +VERSION=$(<./scripts/version) +IMAGE=${IMAGE:-rancher/docker:${VERSION}} diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/download b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/download new file mode 100644 index 00000000..bd2ca340 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/download @@ -0,0 +1,53 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +: ${ARTIFACTS:=$(pwd)/assets} +: ${BUILD:=$(pwd)/build} + +check() +{ + local hash=$1 + local file=$2 + + if [ ! -e "$file" ]; then + return 1 + fi + + CURRENT=$(shasum -a 1 $file | awk '{print $1}') + + [ "$hash" = "$CURRENT" ] +} + +download() +{ + mkdir -p ${ARTIFACTS} + + local url=$2 + local file=${ARTIFACTS}/$(basename $2) + local hash=$1 + + if ! check $hash $file; then + curl -sL $url > $file + fi + + if ! check $hash $file; then + echo "ERROR: $file does not match checksum $hash, got $CURRENT" 1>&2 + return 1 + fi +} + +mkdir -p ${BUILD} + +if [ -e base-image/dist/base-files.tar.gz ]; then + cp base-image/dist/base-files.tar.gz build/ +else + download f9e561b91bfdf6c4641e2bec8a69c9d4577b2ba2 https://github.com/rancher/docker-from-scratch/releases/download/bin-v0.1.0/base-files.tar.gz + cp assets/base-files.tar.gz build +fi + +download 1c5ed280185a17f1595084206e10c85599f15299 https://test.docker.com/builds/Linux/x86_64/docker-1.8.0-rc2 + +cp assets/docker-* build/docker +chmod +x build/docker diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/package b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/package new file mode 100644 index 00000000..7ebe7c39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/package @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +. ./scripts/common + +docker run --rm -v `pwd`:/go/src/github.com/rancher/docker-from-scratch dockerscratch-build strip --strip-all build/dockerlaunch +docker build -t $IMAGE -f Dockerfile . + +echo Built $IMAGE diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/test b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/test new file mode 100644 index 00000000..9a1c41e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/test @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +. ./scripts/common + +mkdir -p build/test-cache +ID=$(docker run -d --privileged -e DOCKER_LAUNCH_DEBUG=true -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) $IMAGE -d -s $(docker info | grep 'Storage Driver' | cut -f2 -d:)) +docker logs -f $ID & +trap "docker rm -fv $ID" EXIT +docker exec -i $ID docker build -t test-build git://github.com/rancher/tiny-build.git +docker exec -i $ID docker version +docker exec -i $ID docker info +docker exec -i $ID docker run test-build diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/version b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/version new file mode 100644 index 00000000..e0645eb7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/scripts/version @@ -0,0 +1 @@ +1.8.0-rc2 diff --git a/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/util/util_linux.go b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/util/util_linux.go new file mode 100644 index 00000000..e33d458c --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/docker-from-scratch/util/util_linux.go @@ -0,0 +1,41 @@ +// +build linux + +package util + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +func mountProc() error { + if _, err := os.Stat("/proc/self/mountinfo"); os.IsNotExist(err) { + if _, err := os.Stat("/proc"); os.IsNotExist(err) { + if err = os.Mkdir("/proc", 0755); err != nil { + return err + } + } + + if err := syscall.Mount("none", "/proc", "proc", 0, ""); err != nil { + return err + } + } + + return nil +} + +func Mount(device, directory, fsType, options string) error { + if err := mountProc(); err != nil { + return nil + } + + if _, err := os.Stat(directory); os.IsNotExist(err) { + err = os.MkdirAll(directory, 0755) + if err != nil { + return err + } + } + + return mount.Mount(device, directory, fsType, options) +} diff --git a/Godeps/_workspace/src/github.com/rancher/netconf/LICENSE b/Godeps/_workspace/src/github.com/rancher/netconf/LICENSE new file mode 100644 index 00000000..e454a525 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/netconf/LICENSE @@ -0,0 +1,178 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/Godeps/_workspace/src/github.com/rancher/netconf/README.md b/Godeps/_workspace/src/github.com/rancher/netconf/README.md new file mode 100644 index 00000000..8d676d34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/netconf/README.md @@ -0,0 +1,3 @@ +# RancherOS Netconf + +Simple wrapper around various netlink calls to configure networking diff --git a/Godeps/_workspace/src/github.com/rancher/netconf/ipv4ll.go b/Godeps/_workspace/src/github.com/rancher/netconf/ipv4ll.go new file mode 100644 index 00000000..b7ed8ec3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/netconf/ipv4ll.go @@ -0,0 +1,75 @@ +package netconf + +import ( + "encoding/binary" + "fmt" + "math/rand" + "net" + + log "github.com/Sirupsen/logrus" + + "github.com/j-keck/arping" + "github.com/vishvananda/netlink" +) + +func AssignLinkLocalIP(link netlink.Link) error { + ifaceName := link.Attrs().Name + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + log.Error("could not get information about interface") + return err + } + addrs, err := iface.Addrs() + if err != nil { + log.Error("Error fetching existing ip on interface") + } + for _, addr := range addrs { + if addr.String()[:7] == "169.254" { + log.Info("Link Local IP already set on interface") + return nil + } + } + randSource, err := getPseudoRandomGenerator(link.Attrs().HardwareAddr) + if err != nil { + return err + } + // try a random address upto 10 times + for i := 0; i < 10; i++ { + randGenerator := rand.New(*randSource) + randomNum := randGenerator.Uint32() + dstIP := getNewIPV4LLAddr(randomNum) + if dstIP[2] == 0 || dstIP[2] == 255 { + i-- + continue + } + _, _, err := arping.PingOverIfaceByName(dstIP, ifaceName) + if err != nil { + // this ip is not being used + addr, err := netlink.ParseAddr(dstIP.String() + "/16") + if err != nil { + log.Errorf("error while parsing ipv4ll addr, err = %v", err) + return err + } + if err := netlink.AddrAdd(link, addr); err != nil { + log.Error("ipv4ll addr add failed") + return err + } + log.Infof("Set %s on %s", dstIP.String(), link.Attrs().Name) + return nil + } + } + log.Error("Could not find a suitable ipv4ll") + return fmt.Errorf("Could not find a suitable ipv4ll") +} + +func getNewIPV4LLAddr(randomNum uint32) net.IP { + byte1 := randomNum & 255 // use least significant 8 bits + byte2 := randomNum >> 24 // use most significant 8 bits + return []byte{169, 254, byte(byte1), byte(byte2)} +} + +func getPseudoRandomGenerator(haAddr []byte) (*rand.Source, error) { + seed, _ := binary.Varint(haAddr) + src := rand.NewSource(seed) + return &src, nil +} diff --git a/Godeps/_workspace/src/github.com/rancher/netconf/netconf.go b/Godeps/_workspace/src/github.com/rancher/netconf/netconf.go new file mode 100644 index 00000000..f8f4efea --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/netconf/netconf.go @@ -0,0 +1,161 @@ +package netconf + +import ( + "bytes" + "errors" + "net" + "os" + "os/exec" + "strings" + + log "github.com/Sirupsen/logrus" + + "github.com/ryanuber/go-glob" + "github.com/vishvananda/netlink" +) + +func createInterfaces(netCfg *NetworkConfig) error { + for name, iface := range netCfg.Interfaces { + if !iface.Bridge { + continue + } + + bridge := netlink.Bridge{} + bridge.LinkAttrs.Name = name + + if err := netlink.LinkAdd(&bridge); err != nil { + log.Errorf("Failed to create bridge %s: %v", name, err) + } + } + + return nil +} + +func ApplyNetworkConfigs(netCfg *NetworkConfig) error { + log.Debugf("Config: %#v", netCfg) + if err := createInterfaces(netCfg); err != nil { + return err + } + + links, err := netlink.LinkList() + if err != nil { + return err + } + + //apply network config + for _, link := range links { + linkName := link.Attrs().Name + var match InterfaceConfig + + for key, netConf := range netCfg.Interfaces { + if netConf.Match == "" { + netConf.Match = key + } + + if netConf.Match == "" { + continue + } + + if len(netConf.Match) > 4 && strings.ToLower(netConf.Match[:3]) == "mac" { + haAddr, err := net.ParseMAC(netConf.Match[4:]) + if err != nil { + return err + } + if bytes.Compare(haAddr, link.Attrs().HardwareAddr) == 0 { + // MAC address match is used over all other matches + match = netConf + break + } + } + + // "" means match has not been found + if match.Match == "" && matches(linkName, netConf.Match) { + match = netConf + } + + if netConf.Match == linkName { + // Found exact match, use it over wildcard match + match = netConf + } + } + + if match.Match != "" { + err = applyNetConf(link, match) + if err != nil { + log.Errorf("Failed to apply settings to %s : %v", linkName, err) + } + } + } + + if err != nil { + return err + } + + return nil +} + +func applyNetConf(link netlink.Link, netConf InterfaceConfig) error { + if netConf.DHCP { + log.Infof("Running DHCP on %s", link.Attrs().Name) + cmd := exec.Command("dhcpcd", "-A4", "-e", "force_hostname=true", link.Attrs().Name) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + log.Error(err) + } + } else if netConf.IPV4LL { + if err := AssignLinkLocalIP(link); err != nil { + log.Errorf("IPV4LL set failed: %v", err) + return err + } + } else if netConf.Address == "" { + return nil + } else { + addr, err := netlink.ParseAddr(netConf.Address) + if err != nil { + return err + } + if err := netlink.AddrAdd(link, addr); err != nil { + //Ignore this error + log.Errorf("addr add failed: %v", err) + } else { + log.Infof("Set %s on %s", netConf.Address, link.Attrs().Name) + } + } + + if netConf.MTU > 0 { + if err := netlink.LinkSetMTU(link, netConf.MTU); err != nil { + log.Errorf("set MTU Failed: %v", err) + return err + } + } + + if err := netlink.LinkSetUp(link); err != nil { + log.Errorf("failed to setup link: %v", err) + return err + } + + if netConf.Gateway != "" { + gatewayIp := net.ParseIP(netConf.Gateway) + if gatewayIp == nil { + return errors.New("Invalid gateway address " + netConf.Gateway) + } + + route := netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(netConf.Gateway), + } + if err := netlink.RouteAdd(&route); err != nil { + log.Errorf("gateway set failed: %v", err) + return err + } + + log.Infof("Set default gateway %s", netConf.Gateway) + } + + return nil +} + +func matches(link, conf string) bool { + return glob.Glob(conf, link) +} diff --git a/Godeps/_workspace/src/github.com/rancher/netconf/types.go b/Godeps/_workspace/src/github.com/rancher/netconf/types.go new file mode 100644 index 00000000..b278f493 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rancher/netconf/types.go @@ -0,0 +1,21 @@ +package netconf + +type NetworkConfig struct { + Dns DnsConfig `yaml:"dns,omitempty"` + Interfaces map[string]InterfaceConfig `yaml:"interfaces,omitempty"` +} + +type InterfaceConfig struct { + Match string `yaml:"match,omitempty"` + DHCP bool `yaml:"dhcp,omitempty"` + Address string `yaml:"address,omitempty"` + IPV4LL bool `yaml:"ipv4ll,omitempty"` + Gateway string `yaml:"gateway,omitempty"` + MTU int `yaml:"mtu,omitempty"` + Bridge bool `yaml:"bridge,omitempty"` +} + +type DnsConfig struct { + Nameservers []string `yaml:"nameservers,flow,omitempty"` + Search []string `yaml:"search,flow,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client.go deleted file mode 100644 index 0b14ada3..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client.go +++ /dev/null @@ -1,7 +0,0 @@ -package client - -type RancherBaseClient struct { - Opts *ClientOpts - Schemas *Schemas - Types map[string]Schema -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client_test.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client_test.go deleted file mode 100644 index 92110348..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/client_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package client - -import ( - "testing" - "time" -) - -const ( - URL = "http://localhost:8080/v1" - ACCESS_KEY = "admin" - SECRET_KEY = "adminpass" - MAX_WAIT = time.Duration(time.Second * 10) -) - -func newClient(t *testing.T) *RancherClient { - client, err := NewRancherClient(&ClientOpts{ - Url: URL, - AccessKey: ACCESS_KEY, - SecretKey: SECRET_KEY, - }) - - if err != nil { - t.Fatal("Failed to create client", err) - } - - return client -} - -func TestClientLoad(t *testing.T) { - client := newClient(t) - if client.Schemas == nil { - t.Fatal("Failed to load schema") - } - - if len(client.Schemas.Data) == 0 { - t.Fatal("Schemas is empty") - } - - if _, ok := client.Types["container"]; !ok { - t.Fatal("Failed to find container type") - } -} - -func TestContainerList(t *testing.T) { - client := newClient(t) - - /* Create a container to ensure list will return something */ - container, err := client.Container.Create(&Container{ - Name: "a name", - ImageUuid: "docker:nginx", - }) - if err != nil { - t.Fatal(err) - } - - defer client.Container.Delete(container) - - containers, err := client.Container.List(nil) - - if err != nil { - t.Fatal("Failed to list containers", err) - } - - if len(containers.Data) == 0 { - t.Fatal("No containers found") - } - - if len(containers.Data[0].Id) == 0 { - t.Fatal("Container ID is not set") - } - - listOpts := NewListOpts() - listOpts.Filters["id"] = "comeBackEmpty" - containers, err = client.Container.List(listOpts) - - if err != nil { - t.Fatal("Failed to list containers", err) - } - - if len(containers.Data) != 0 { - t.Fatal("Filter should have found no contianers.") - } -} - -func TestContainerCreate(t *testing.T) { - client := newClient(t) - container, err := client.Container.Create(&Container{ - Name: "a name", - ImageUuid: "docker:nginx", - }) - - if err != nil { - t.Fatal(err) - } - - defer client.Container.Delete(container) - - if container.Name != "a name" { - t.Fatal("Field name is wrong [" + container.Name + "]") - } - - if container.ImageUuid != "docker:nginx" { - t.Fatal("Field imageUuid is wrong [" + container.ImageUuid + "]") - } -} - -func TestContainerUpdate(t *testing.T) { - client := newClient(t) - container, err := client.Container.Create(&Container{ - Name: "a name", - ImageUuid: "docker:nginx", - }) - - if err != nil { - t.Fatal(err) - } - - defer client.Container.Delete(container) - - if container.Name != "a name" { - t.Fatal("Field name is wrong [" + container.Name + "]") - } - - container, err = client.Container.Update(container, &Container{ - Name: "a different name", - }) - - if container.Name != "a different name" { - t.Fatal("Field name is wrong [" + container.Name + "]") - } - - by_id_container, err := client.Container.ById(string(container.Id)) - if err != nil { - t.Fatal(err) - } - - if by_id_container.Id != container.Id { - t.Fatal("Container from by ID did not match") - } - - if by_id_container.Name != container.Name { - t.Fatal("Container from by ID did not match for name") - } -} - -func TestContainerDelete(t *testing.T) { - client := newClient(t) - container, err := client.Container.Create(&Container{ - Name: "a name", - ImageUuid: "docker:nginx", - }) - - if err != nil { - t.Fatal(err) - } - - err = client.Container.Delete(container) - if err != nil { - t.Fatal("Failed to delete", err) - } -} - -func TestContainerNotExists(t *testing.T) { - client := newClient(t) - _, err := client.Container.ById("badId1") - if err == nil { - t.Fatal("Should have received an error getting non-existent container.") - } - - apiError, ok := err.(*ApiError) - if !ok { - t.Fatal("Should have received an ApiError.") - } - if apiError.StatusCode != 404 { - t.Fatal("Should have received a 404 and reported it on the ApiError.") - } -} - -func TestAccountAction(t *testing.T) { - client := newClient(t) - account, err := client.Account.Create(&Account{ - Name: "a name", - }) - - if err != nil { - t.Fatal(err) - } - - defer client.Account.Delete(account) - - account = waitAccountTransition(account, client, t) - if account.State == "inactive" { - t.Fatal("Account shouldnt be inactive.") - } - - account, err = client.Account.ActionDeactivate(account) - if err != nil { - t.Fatal(err) - } - - account = waitAccountTransition(account, client, t) - if account.State != "inactive" { - t.Fatal("Account didnt deactivate") - } -} - -func TestPublishCreate(t *testing.T) { - client := newClient(t) - _, err := client.Publish.Create(&Publish{ - Name: "foo", - }) - - if err != nil { - t.Fatal(err) - } -} - -func waitAccountTransition(account *Account, client *RancherClient, t *testing.T) *Account { - timeoutAt := time.Now().Add(MAX_WAIT) - ticker := time.NewTicker(time.Millisecond * 250) - defer ticker.Stop() - for tick := range ticker.C { - account, err := client.Account.ById(account.Id) - if err != nil { - t.Fatal("Couldn't get account") - } - if account.Transitioning != "yes" { - return account - } - if tick.After(timeoutAt) { - t.Fatal("Timed out waiting for account to activate.") - } - } - t.Fatal("Timed out waiting for account to activate.") - return nil -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/common.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/common.go deleted file mode 100644 index 88de8c61..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/common.go +++ /dev/null @@ -1,418 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" -) - -const ( - SELF = "self" - COLLECTION = "collection" -) - -type ClientOpts struct { - Url string - AccessKey string - SecretKey string -} - -type ApiError struct { - StatusCode int - Url string - Msg string - Status string - Body string -} - -func (e *ApiError) Error() string { - return e.Msg -} - -func newApiError(resp *http.Response, url string) *ApiError { - contents, err := ioutil.ReadAll(resp.Body) - var body string - if err != nil { - body = "Unreadable body." - } else { - body = string(contents) - } - formattedMsg := fmt.Sprintf("Bad response from [%s], statusCode [%d]. Status [%s]. Body: [%s]", - url, resp.StatusCode, resp.Status, body) - return &ApiError{ - Url: url, - Msg: formattedMsg, - StatusCode: resp.StatusCode, - Status: resp.Status, - Body: body, - } -} - -func contains(array []string, item string) bool { - for _, check := range array { - if check == item { - return true - } - } - - return false -} - -func appendFilters(urlString string, filters map[string]interface{}) (string, error) { - if len(filters) == 0 { - return urlString, nil - } - - u, err := url.Parse(urlString) - if err != nil { - return "", err - } - - q := u.Query() - for k, v := range filters { - q.Add(k, fmt.Sprintf("%v", v)) - } - - u.RawQuery = q.Encode() - return u.String(), nil -} - -func setupRancherBaseClient(rancherClient *RancherBaseClient, opts *ClientOpts) error { - client := &http.Client{} - req, err := http.NewRequest("GET", opts.Url, nil) - if err != nil { - return err - } - - req.SetBasicAuth(opts.AccessKey, opts.SecretKey) - - resp, err := client.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return newApiError(resp, opts.Url) - } - - schemasUrls := resp.Header.Get("X-API-Schemas") - if len(schemasUrls) == 0 { - return errors.New("Failed to find schema at [" + opts.Url + "]") - } - - if schemasUrls != opts.Url { - req, err = http.NewRequest("GET", schemasUrls, nil) - req.SetBasicAuth(opts.AccessKey, opts.SecretKey) - if err != nil { - return err - } - - resp, err = client.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return newApiError(resp, opts.Url) - } - } - - var schemas Schemas - bytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - err = json.Unmarshal(bytes, &schemas) - if err != nil { - return err - } - - rancherClient.Opts = opts - rancherClient.Schemas = &schemas - - for _, schema := range schemas.Data { - rancherClient.Types[schema.Id] = schema - } - - return nil -} - -func NewListOpts() *ListOpts { - return &ListOpts{ - Filters: map[string]interface{}{}, - } -} - -func (rancherClient *RancherBaseClient) setupRequest(req *http.Request) { - req.SetBasicAuth(rancherClient.Opts.AccessKey, rancherClient.Opts.SecretKey) -} - -func (rancherClient *RancherBaseClient) newHttpClient() *http.Client { - return &http.Client{} -} - -func (rancherClient *RancherBaseClient) doDelete(url string) error { - client := rancherClient.newHttpClient() - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return err - } - - rancherClient.setupRequest(req) - - resp, err := client.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode >= 300 { - return newApiError(resp, url) - } - - return nil -} - -func (rancherClient *RancherBaseClient) doGet(url string, opts *ListOpts, respObject interface{}) error { - if opts == nil { - opts = NewListOpts() - } - url, err := appendFilters(url, opts.Filters) - if err != nil { - return err - } - - client := rancherClient.newHttpClient() - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return err - } - - rancherClient.setupRequest(req) - - resp, err := client.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return newApiError(resp, url) - } - - byteContent, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - return json.Unmarshal(byteContent, respObject) -} - -func (rancherClient *RancherBaseClient) doList(schemaType string, opts *ListOpts, respObject interface{}) error { - schema, ok := rancherClient.Types[schemaType] - if !ok { - return errors.New("Unknown schema type [" + schemaType + "]") - } - - if !contains(schema.CollectionMethods, "GET") { - return errors.New("Resource type [" + schemaType + "] is not listable") - } - - collectionUrl, ok := schema.Links[COLLECTION] - if !ok { - return errors.New("Failed to find collection URL for [" + schemaType + "]") - } - - return rancherClient.doGet(collectionUrl, opts, respObject) -} - -func (rancherClient *RancherBaseClient) doModify(method string, url string, createObj interface{}, respObject interface{}) error { - bodyContent, err := json.Marshal(createObj) - if err != nil { - return err - } - - client := rancherClient.newHttpClient() - req, err := http.NewRequest(method, url, bytes.NewBuffer(bodyContent)) - if err != nil { - return err - } - - rancherClient.setupRequest(req) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Content-Length", string(len(bodyContent))) - - resp, err := client.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode >= 300 { - return newApiError(resp, url) - } - - byteContent, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - if len(byteContent) > 0 { - return json.Unmarshal(byteContent, respObject) - } - return nil -} - -func (rancherClient *RancherBaseClient) doCreate(schemaType string, createObj interface{}, respObject interface{}) error { - if createObj == nil { - createObj = map[string]string{} - } - schema, ok := rancherClient.Types[schemaType] - if !ok { - return errors.New("Unknown schema type [" + schemaType + "]") - } - - if !contains(schema.CollectionMethods, "POST") { - return errors.New("Resource type [" + schemaType + "] is not creatable") - } - - var collectionUrl string - collectionUrl, ok = schema.Links[COLLECTION] - if !ok { - // return errors.New("Failed to find collection URL for [" + schemaType + "]") - // This is a hack to address https://github.com/rancherio/cattle/issues/254 - re := regexp.MustCompile("schemas.*") - collectionUrl = re.ReplaceAllString(schema.Links[SELF], schema.PluralName) - } - - return rancherClient.doModify("POST", collectionUrl, createObj, respObject) -} - -func (rancherClient *RancherBaseClient) doUpdate(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error { - if existing == nil { - return errors.New("Existing object is nil") - } - - selfUrl, ok := existing.Links[SELF] - if !ok { - return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing)) - } - - if updates == nil { - updates = map[string]string{} - } - - schema, ok := rancherClient.Types[schemaType] - if !ok { - return errors.New("Unknown schema type [" + schemaType + "]") - } - - if !contains(schema.ResourceMethods, "PUT") { - return errors.New("Resource type [" + schemaType + "] is not updatable") - } - - return rancherClient.doModify("PUT", selfUrl, updates, respObject) -} - -func (rancherClient *RancherBaseClient) doById(schemaType string, id string, respObject interface{}) error { - schema, ok := rancherClient.Types[schemaType] - if !ok { - return errors.New("Unknown schema type [" + schemaType + "]") - } - - if !contains(schema.ResourceMethods, "GET") { - return errors.New("Resource type [" + schemaType + "] can not be looked up by ID") - } - - collectionUrl, ok := schema.Links[COLLECTION] - if !ok { - return errors.New("Failed to find collection URL for [" + schemaType + "]") - } - - err := rancherClient.doGet(collectionUrl+"/"+id, nil, respObject) - //TODO check for 404 and return nil, nil - return err -} - -func (rancherClient *RancherBaseClient) doResourceDelete(schemaType string, existing *Resource) error { - schema, ok := rancherClient.Types[schemaType] - if !ok { - return errors.New("Unknown schema type [" + schemaType + "]") - } - - if !contains(schema.ResourceMethods, "DELETE") { - return errors.New("Resource type [" + schemaType + "] can not be deleted") - } - - selfUrl, ok := existing.Links[SELF] - if !ok { - return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing)) - } - - return rancherClient.doDelete(selfUrl) -} - -func (rancherClient *RancherBaseClient) doEmptyAction(schemaType string, action string, - existing *Resource, respObject interface{}) error { - // TODO Actions with inputs currently not supported. - - if existing == nil { - return errors.New("Existing object is nil") - } - - actionUrl, ok := existing.Actions[action] - if !ok { - return errors.New(fmt.Sprintf("Action [%v] not available on [%v]", action, existing)) - } - - schema, ok := rancherClient.Types[schemaType] - if !ok { - return errors.New("Unknown schema type [" + schemaType + "]") - } - - if schema.ResourceActions[action].Input != "" { - return fmt.Errorf("Actions with inputs or outputs not yet support. Input: [%v] Output: [%v].", - schema.ResourceActions[action].Input) - } - - client := rancherClient.newHttpClient() - req, err := http.NewRequest("POST", actionUrl, nil) - if err != nil { - return err - } - - rancherClient.setupRequest(req) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Content-Length", "0") - - resp, err := client.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode >= 300 { - return newApiError(resp, actionUrl) - } - - byteContent, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - return json.Unmarshal(byteContent, respObject) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_account.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_account.go deleted file mode 100644 index ba307cd1..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_account.go +++ /dev/null @@ -1,138 +0,0 @@ -package client - -const ( - ACCOUNT_TYPE = "account" -) - -type Account struct { - Resource - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - ExternalId string `json:"externalId,omitempty"` - - ExternalIdType string `json:"externalIdType,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type AccountCollection struct { - Collection - Data []Account `json:"data,omitempty"` -} - -type AccountClient struct { - rancherClient *RancherClient -} - -type AccountOperations interface { - List(opts *ListOpts) (*AccountCollection, error) - Create(opts *Account) (*Account, error) - Update(existing *Account, updates interface{}) (*Account, error) - ById(id string) (*Account, error) - Delete(container *Account) error - ActionActivate (*Account) (*Account, error) - ActionCreate (*Account) (*Account, error) - ActionDeactivate (*Account) (*Account, error) - ActionPurge (*Account) (*Account, error) - ActionRemove (*Account) (*Account, error) - ActionRestore (*Account) (*Account, error) - ActionUpdate (*Account) (*Account, error) -} - -func newAccountClient(rancherClient *RancherClient) *AccountClient { - return &AccountClient{ - rancherClient: rancherClient, - } -} - -func (c *AccountClient) Create(container *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doCreate(ACCOUNT_TYPE, container, resp) - return resp, err -} - -func (c *AccountClient) Update(existing *Account, updates interface{}) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doUpdate(ACCOUNT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AccountClient) List(opts *ListOpts) (*AccountCollection, error) { - resp := &AccountCollection{} - err := c.rancherClient.doList(ACCOUNT_TYPE, opts, resp) - return resp, err -} - -func (c *AccountClient) ById(id string) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doById(ACCOUNT_TYPE, id, resp) - return resp, err -} - -func (c *AccountClient) Delete(container *Account) error { - return c.rancherClient.doResourceDelete(ACCOUNT_TYPE, &container.Resource) -} - -func (c *AccountClient) ActionActivate(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *AccountClient) ActionCreate(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *AccountClient) ActionDeactivate(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *AccountClient) ActionPurge(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *AccountClient) ActionRemove(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *AccountClient) ActionRestore(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *AccountClient) ActionUpdate(resource *Account) (*Account, error) { - resp := &Account{} - err := c.rancherClient.doEmptyAction(ACCOUNT_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_active_setting.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_active_setting.go deleted file mode 100644 index 029513be..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_active_setting.go +++ /dev/null @@ -1,71 +0,0 @@ -package client - -const ( - ACTIVE_SETTING_TYPE = "activeSetting" -) - -type ActiveSetting struct { - Resource - - ActiveValue interface{} `json:"activeValue,omitempty"` - - InDb bool `json:"inDb,omitempty"` - - Name string `json:"name,omitempty"` - - Source string `json:"source,omitempty"` - - Value string `json:"value,omitempty"` - -} - -type ActiveSettingCollection struct { - Collection - Data []ActiveSetting `json:"data,omitempty"` -} - -type ActiveSettingClient struct { - rancherClient *RancherClient -} - -type ActiveSettingOperations interface { - List(opts *ListOpts) (*ActiveSettingCollection, error) - Create(opts *ActiveSetting) (*ActiveSetting, error) - Update(existing *ActiveSetting, updates interface{}) (*ActiveSetting, error) - ById(id string) (*ActiveSetting, error) - Delete(container *ActiveSetting) error -} - -func newActiveSettingClient(rancherClient *RancherClient) *ActiveSettingClient { - return &ActiveSettingClient{ - rancherClient: rancherClient, - } -} - -func (c *ActiveSettingClient) Create(container *ActiveSetting) (*ActiveSetting, error) { - resp := &ActiveSetting{} - err := c.rancherClient.doCreate(ACTIVE_SETTING_TYPE, container, resp) - return resp, err -} - -func (c *ActiveSettingClient) Update(existing *ActiveSetting, updates interface{}) (*ActiveSetting, error) { - resp := &ActiveSetting{} - err := c.rancherClient.doUpdate(ACTIVE_SETTING_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ActiveSettingClient) List(opts *ListOpts) (*ActiveSettingCollection, error) { - resp := &ActiveSettingCollection{} - err := c.rancherClient.doList(ACTIVE_SETTING_TYPE, opts, resp) - return resp, err -} - -func (c *ActiveSettingClient) ById(id string) (*ActiveSetting, error) { - resp := &ActiveSetting{} - err := c.rancherClient.doById(ACTIVE_SETTING_TYPE, id, resp) - return resp, err -} - -func (c *ActiveSettingClient) Delete(container *ActiveSetting) error { - return c.rancherClient.doResourceDelete(ACTIVE_SETTING_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_load_balancer_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_load_balancer_input.go deleted file mode 100644 index 6f5f61f1..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_load_balancer_input.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - ADD_LOAD_BALANCER_INPUT_TYPE = "addLoadBalancerInput" -) - -type AddLoadBalancerInput struct { - Resource - - LoadBalancerId string `json:"loadBalancerId,omitempty"` - - Weight int `json:"weight,omitempty"` - -} - -type AddLoadBalancerInputCollection struct { - Collection - Data []AddLoadBalancerInput `json:"data,omitempty"` -} - -type AddLoadBalancerInputClient struct { - rancherClient *RancherClient -} - -type AddLoadBalancerInputOperations interface { - List(opts *ListOpts) (*AddLoadBalancerInputCollection, error) - Create(opts *AddLoadBalancerInput) (*AddLoadBalancerInput, error) - Update(existing *AddLoadBalancerInput, updates interface{}) (*AddLoadBalancerInput, error) - ById(id string) (*AddLoadBalancerInput, error) - Delete(container *AddLoadBalancerInput) error -} - -func newAddLoadBalancerInputClient(rancherClient *RancherClient) *AddLoadBalancerInputClient { - return &AddLoadBalancerInputClient{ - rancherClient: rancherClient, - } -} - -func (c *AddLoadBalancerInputClient) Create(container *AddLoadBalancerInput) (*AddLoadBalancerInput, error) { - resp := &AddLoadBalancerInput{} - err := c.rancherClient.doCreate(ADD_LOAD_BALANCER_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *AddLoadBalancerInputClient) Update(existing *AddLoadBalancerInput, updates interface{}) (*AddLoadBalancerInput, error) { - resp := &AddLoadBalancerInput{} - err := c.rancherClient.doUpdate(ADD_LOAD_BALANCER_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AddLoadBalancerInputClient) List(opts *ListOpts) (*AddLoadBalancerInputCollection, error) { - resp := &AddLoadBalancerInputCollection{} - err := c.rancherClient.doList(ADD_LOAD_BALANCER_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *AddLoadBalancerInputClient) ById(id string) (*AddLoadBalancerInput, error) { - resp := &AddLoadBalancerInput{} - err := c.rancherClient.doById(ADD_LOAD_BALANCER_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *AddLoadBalancerInputClient) Delete(container *AddLoadBalancerInput) error { - return c.rancherClient.doResourceDelete(ADD_LOAD_BALANCER_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_cluster_host_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_cluster_host_input.go deleted file mode 100644 index 574e66f3..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_cluster_host_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - ADD_REMOVE_CLUSTER_HOST_INPUT_TYPE = "addRemoveClusterHostInput" -) - -type AddRemoveClusterHostInput struct { - Resource - - HostId string `json:"hostId,omitempty"` - -} - -type AddRemoveClusterHostInputCollection struct { - Collection - Data []AddRemoveClusterHostInput `json:"data,omitempty"` -} - -type AddRemoveClusterHostInputClient struct { - rancherClient *RancherClient -} - -type AddRemoveClusterHostInputOperations interface { - List(opts *ListOpts) (*AddRemoveClusterHostInputCollection, error) - Create(opts *AddRemoveClusterHostInput) (*AddRemoveClusterHostInput, error) - Update(existing *AddRemoveClusterHostInput, updates interface{}) (*AddRemoveClusterHostInput, error) - ById(id string) (*AddRemoveClusterHostInput, error) - Delete(container *AddRemoveClusterHostInput) error -} - -func newAddRemoveClusterHostInputClient(rancherClient *RancherClient) *AddRemoveClusterHostInputClient { - return &AddRemoveClusterHostInputClient{ - rancherClient: rancherClient, - } -} - -func (c *AddRemoveClusterHostInputClient) Create(container *AddRemoveClusterHostInput) (*AddRemoveClusterHostInput, error) { - resp := &AddRemoveClusterHostInput{} - err := c.rancherClient.doCreate(ADD_REMOVE_CLUSTER_HOST_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *AddRemoveClusterHostInputClient) Update(existing *AddRemoveClusterHostInput, updates interface{}) (*AddRemoveClusterHostInput, error) { - resp := &AddRemoveClusterHostInput{} - err := c.rancherClient.doUpdate(ADD_REMOVE_CLUSTER_HOST_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AddRemoveClusterHostInputClient) List(opts *ListOpts) (*AddRemoveClusterHostInputCollection, error) { - resp := &AddRemoveClusterHostInputCollection{} - err := c.rancherClient.doList(ADD_REMOVE_CLUSTER_HOST_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *AddRemoveClusterHostInputClient) ById(id string) (*AddRemoveClusterHostInput, error) { - resp := &AddRemoveClusterHostInput{} - err := c.rancherClient.doById(ADD_REMOVE_CLUSTER_HOST_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *AddRemoveClusterHostInputClient) Delete(container *AddRemoveClusterHostInput) error { - return c.rancherClient.doResourceDelete(ADD_REMOVE_CLUSTER_HOST_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_host_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_host_input.go deleted file mode 100644 index 79b21738..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_host_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - ADD_REMOVE_LOAD_BALANCER_HOST_INPUT_TYPE = "addRemoveLoadBalancerHostInput" -) - -type AddRemoveLoadBalancerHostInput struct { - Resource - - HostId string `json:"hostId,omitempty"` - -} - -type AddRemoveLoadBalancerHostInputCollection struct { - Collection - Data []AddRemoveLoadBalancerHostInput `json:"data,omitempty"` -} - -type AddRemoveLoadBalancerHostInputClient struct { - rancherClient *RancherClient -} - -type AddRemoveLoadBalancerHostInputOperations interface { - List(opts *ListOpts) (*AddRemoveLoadBalancerHostInputCollection, error) - Create(opts *AddRemoveLoadBalancerHostInput) (*AddRemoveLoadBalancerHostInput, error) - Update(existing *AddRemoveLoadBalancerHostInput, updates interface{}) (*AddRemoveLoadBalancerHostInput, error) - ById(id string) (*AddRemoveLoadBalancerHostInput, error) - Delete(container *AddRemoveLoadBalancerHostInput) error -} - -func newAddRemoveLoadBalancerHostInputClient(rancherClient *RancherClient) *AddRemoveLoadBalancerHostInputClient { - return &AddRemoveLoadBalancerHostInputClient{ - rancherClient: rancherClient, - } -} - -func (c *AddRemoveLoadBalancerHostInputClient) Create(container *AddRemoveLoadBalancerHostInput) (*AddRemoveLoadBalancerHostInput, error) { - resp := &AddRemoveLoadBalancerHostInput{} - err := c.rancherClient.doCreate(ADD_REMOVE_LOAD_BALANCER_HOST_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerHostInputClient) Update(existing *AddRemoveLoadBalancerHostInput, updates interface{}) (*AddRemoveLoadBalancerHostInput, error) { - resp := &AddRemoveLoadBalancerHostInput{} - err := c.rancherClient.doUpdate(ADD_REMOVE_LOAD_BALANCER_HOST_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerHostInputClient) List(opts *ListOpts) (*AddRemoveLoadBalancerHostInputCollection, error) { - resp := &AddRemoveLoadBalancerHostInputCollection{} - err := c.rancherClient.doList(ADD_REMOVE_LOAD_BALANCER_HOST_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerHostInputClient) ById(id string) (*AddRemoveLoadBalancerHostInput, error) { - resp := &AddRemoveLoadBalancerHostInput{} - err := c.rancherClient.doById(ADD_REMOVE_LOAD_BALANCER_HOST_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerHostInputClient) Delete(container *AddRemoveLoadBalancerHostInput) error { - return c.rancherClient.doResourceDelete(ADD_REMOVE_LOAD_BALANCER_HOST_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_listener_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_listener_input.go deleted file mode 100644 index 1c341a99..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_listener_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - ADD_REMOVE_LOAD_BALANCER_LISTENER_INPUT_TYPE = "addRemoveLoadBalancerListenerInput" -) - -type AddRemoveLoadBalancerListenerInput struct { - Resource - - LoadBalancerListenerId string `json:"loadBalancerListenerId,omitempty"` - -} - -type AddRemoveLoadBalancerListenerInputCollection struct { - Collection - Data []AddRemoveLoadBalancerListenerInput `json:"data,omitempty"` -} - -type AddRemoveLoadBalancerListenerInputClient struct { - rancherClient *RancherClient -} - -type AddRemoveLoadBalancerListenerInputOperations interface { - List(opts *ListOpts) (*AddRemoveLoadBalancerListenerInputCollection, error) - Create(opts *AddRemoveLoadBalancerListenerInput) (*AddRemoveLoadBalancerListenerInput, error) - Update(existing *AddRemoveLoadBalancerListenerInput, updates interface{}) (*AddRemoveLoadBalancerListenerInput, error) - ById(id string) (*AddRemoveLoadBalancerListenerInput, error) - Delete(container *AddRemoveLoadBalancerListenerInput) error -} - -func newAddRemoveLoadBalancerListenerInputClient(rancherClient *RancherClient) *AddRemoveLoadBalancerListenerInputClient { - return &AddRemoveLoadBalancerListenerInputClient{ - rancherClient: rancherClient, - } -} - -func (c *AddRemoveLoadBalancerListenerInputClient) Create(container *AddRemoveLoadBalancerListenerInput) (*AddRemoveLoadBalancerListenerInput, error) { - resp := &AddRemoveLoadBalancerListenerInput{} - err := c.rancherClient.doCreate(ADD_REMOVE_LOAD_BALANCER_LISTENER_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerListenerInputClient) Update(existing *AddRemoveLoadBalancerListenerInput, updates interface{}) (*AddRemoveLoadBalancerListenerInput, error) { - resp := &AddRemoveLoadBalancerListenerInput{} - err := c.rancherClient.doUpdate(ADD_REMOVE_LOAD_BALANCER_LISTENER_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerListenerInputClient) List(opts *ListOpts) (*AddRemoveLoadBalancerListenerInputCollection, error) { - resp := &AddRemoveLoadBalancerListenerInputCollection{} - err := c.rancherClient.doList(ADD_REMOVE_LOAD_BALANCER_LISTENER_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerListenerInputClient) ById(id string) (*AddRemoveLoadBalancerListenerInput, error) { - resp := &AddRemoveLoadBalancerListenerInput{} - err := c.rancherClient.doById(ADD_REMOVE_LOAD_BALANCER_LISTENER_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerListenerInputClient) Delete(container *AddRemoveLoadBalancerListenerInput) error { - return c.rancherClient.doResourceDelete(ADD_REMOVE_LOAD_BALANCER_LISTENER_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_target_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_target_input.go deleted file mode 100644 index bfc0974e..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_add_remove_load_balancer_target_input.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - ADD_REMOVE_LOAD_BALANCER_TARGET_INPUT_TYPE = "addRemoveLoadBalancerTargetInput" -) - -type AddRemoveLoadBalancerTargetInput struct { - Resource - - InstanceId string `json:"instanceId,omitempty"` - - IpAddress string `json:"ipAddress,omitempty"` - -} - -type AddRemoveLoadBalancerTargetInputCollection struct { - Collection - Data []AddRemoveLoadBalancerTargetInput `json:"data,omitempty"` -} - -type AddRemoveLoadBalancerTargetInputClient struct { - rancherClient *RancherClient -} - -type AddRemoveLoadBalancerTargetInputOperations interface { - List(opts *ListOpts) (*AddRemoveLoadBalancerTargetInputCollection, error) - Create(opts *AddRemoveLoadBalancerTargetInput) (*AddRemoveLoadBalancerTargetInput, error) - Update(existing *AddRemoveLoadBalancerTargetInput, updates interface{}) (*AddRemoveLoadBalancerTargetInput, error) - ById(id string) (*AddRemoveLoadBalancerTargetInput, error) - Delete(container *AddRemoveLoadBalancerTargetInput) error -} - -func newAddRemoveLoadBalancerTargetInputClient(rancherClient *RancherClient) *AddRemoveLoadBalancerTargetInputClient { - return &AddRemoveLoadBalancerTargetInputClient{ - rancherClient: rancherClient, - } -} - -func (c *AddRemoveLoadBalancerTargetInputClient) Create(container *AddRemoveLoadBalancerTargetInput) (*AddRemoveLoadBalancerTargetInput, error) { - resp := &AddRemoveLoadBalancerTargetInput{} - err := c.rancherClient.doCreate(ADD_REMOVE_LOAD_BALANCER_TARGET_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerTargetInputClient) Update(existing *AddRemoveLoadBalancerTargetInput, updates interface{}) (*AddRemoveLoadBalancerTargetInput, error) { - resp := &AddRemoveLoadBalancerTargetInput{} - err := c.rancherClient.doUpdate(ADD_REMOVE_LOAD_BALANCER_TARGET_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerTargetInputClient) List(opts *ListOpts) (*AddRemoveLoadBalancerTargetInputCollection, error) { - resp := &AddRemoveLoadBalancerTargetInputCollection{} - err := c.rancherClient.doList(ADD_REMOVE_LOAD_BALANCER_TARGET_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerTargetInputClient) ById(id string) (*AddRemoveLoadBalancerTargetInput, error) { - resp := &AddRemoveLoadBalancerTargetInput{} - err := c.rancherClient.doById(ADD_REMOVE_LOAD_BALANCER_TARGET_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *AddRemoveLoadBalancerTargetInputClient) Delete(container *AddRemoveLoadBalancerTargetInput) error { - return c.rancherClient.doResourceDelete(ADD_REMOVE_LOAD_BALANCER_TARGET_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_agent.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_agent.go deleted file mode 100644 index 80ef8ce1..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_agent.go +++ /dev/null @@ -1,147 +0,0 @@ -package client - -const ( - AGENT_TYPE = "agent" -) - -type Agent struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - ManagedConfig bool `json:"managedConfig,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uri string `json:"uri,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type AgentCollection struct { - Collection - Data []Agent `json:"data,omitempty"` -} - -type AgentClient struct { - rancherClient *RancherClient -} - -type AgentOperations interface { - List(opts *ListOpts) (*AgentCollection, error) - Create(opts *Agent) (*Agent, error) - Update(existing *Agent, updates interface{}) (*Agent, error) - ById(id string) (*Agent, error) - Delete(container *Agent) error - ActionActivate (*Agent) (*Agent, error) - ActionCreate (*Agent) (*Agent, error) - ActionDeactivate (*Agent) (*Agent, error) - ActionPurge (*Agent) (*Agent, error) - ActionReconnect (*Agent) (*Agent, error) - ActionRemove (*Agent) (*Agent, error) - ActionRestore (*Agent) (*Agent, error) - ActionUpdate (*Agent) (*Agent, error) -} - -func newAgentClient(rancherClient *RancherClient) *AgentClient { - return &AgentClient{ - rancherClient: rancherClient, - } -} - -func (c *AgentClient) Create(container *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doCreate(AGENT_TYPE, container, resp) - return resp, err -} - -func (c *AgentClient) Update(existing *Agent, updates interface{}) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doUpdate(AGENT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *AgentClient) List(opts *ListOpts) (*AgentCollection, error) { - resp := &AgentCollection{} - err := c.rancherClient.doList(AGENT_TYPE, opts, resp) - return resp, err -} - -func (c *AgentClient) ById(id string) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doById(AGENT_TYPE, id, resp) - return resp, err -} - -func (c *AgentClient) Delete(container *Agent) error { - return c.rancherClient.doResourceDelete(AGENT_TYPE, &container.Resource) -} - -func (c *AgentClient) ActionActivate(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionCreate(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionDeactivate(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionPurge(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionReconnect(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "reconnect", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionRemove(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionRestore(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *AgentClient) ActionUpdate(resource *Agent) (*Agent, error) { - resp := &Agent{} - err := c.rancherClient.doEmptyAction(AGENT_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_api_key.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_api_key.go deleted file mode 100644 index 5748124f..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_api_key.go +++ /dev/null @@ -1,91 +0,0 @@ -package client - -const ( - API_KEY_TYPE = "apiKey" -) - -type ApiKey struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - PublicValue string `json:"publicValue,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - SecretValue string `json:"secretValue,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ApiKeyCollection struct { - Collection - Data []ApiKey `json:"data,omitempty"` -} - -type ApiKeyClient struct { - rancherClient *RancherClient -} - -type ApiKeyOperations interface { - List(opts *ListOpts) (*ApiKeyCollection, error) - Create(opts *ApiKey) (*ApiKey, error) - Update(existing *ApiKey, updates interface{}) (*ApiKey, error) - ById(id string) (*ApiKey, error) - Delete(container *ApiKey) error -} - -func newApiKeyClient(rancherClient *RancherClient) *ApiKeyClient { - return &ApiKeyClient{ - rancherClient: rancherClient, - } -} - -func (c *ApiKeyClient) Create(container *ApiKey) (*ApiKey, error) { - resp := &ApiKey{} - err := c.rancherClient.doCreate(API_KEY_TYPE, container, resp) - return resp, err -} - -func (c *ApiKeyClient) Update(existing *ApiKey, updates interface{}) (*ApiKey, error) { - resp := &ApiKey{} - err := c.rancherClient.doUpdate(API_KEY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ApiKeyClient) List(opts *ListOpts) (*ApiKeyCollection, error) { - resp := &ApiKeyCollection{} - err := c.rancherClient.doList(API_KEY_TYPE, opts, resp) - return resp, err -} - -func (c *ApiKeyClient) ById(id string) (*ApiKey, error) { - resp := &ApiKey{} - err := c.rancherClient.doById(API_KEY_TYPE, id, resp) - return resp, err -} - -func (c *ApiKeyClient) Delete(container *ApiKey) error { - return c.rancherClient.doResourceDelete(API_KEY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_certificate.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_certificate.go deleted file mode 100644 index cea7163f..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_certificate.go +++ /dev/null @@ -1,87 +0,0 @@ -package client - -const ( - CERTIFICATE_TYPE = "certificate" -) - -type Certificate struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Cert string `json:"cert,omitempty"` - - CertChain string `json:"certChain,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Key string `json:"key,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type CertificateCollection struct { - Collection - Data []Certificate `json:"data,omitempty"` -} - -type CertificateClient struct { - rancherClient *RancherClient -} - -type CertificateOperations interface { - List(opts *ListOpts) (*CertificateCollection, error) - Create(opts *Certificate) (*Certificate, error) - Update(existing *Certificate, updates interface{}) (*Certificate, error) - ById(id string) (*Certificate, error) - Delete(container *Certificate) error -} - -func newCertificateClient(rancherClient *RancherClient) *CertificateClient { - return &CertificateClient{ - rancherClient: rancherClient, - } -} - -func (c *CertificateClient) Create(container *Certificate) (*Certificate, error) { - resp := &Certificate{} - err := c.rancherClient.doCreate(CERTIFICATE_TYPE, container, resp) - return resp, err -} - -func (c *CertificateClient) Update(existing *Certificate, updates interface{}) (*Certificate, error) { - resp := &Certificate{} - err := c.rancherClient.doUpdate(CERTIFICATE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *CertificateClient) List(opts *ListOpts) (*CertificateCollection, error) { - resp := &CertificateCollection{} - err := c.rancherClient.doList(CERTIFICATE_TYPE, opts, resp) - return resp, err -} - -func (c *CertificateClient) ById(id string) (*Certificate, error) { - resp := &Certificate{} - err := c.rancherClient.doById(CERTIFICATE_TYPE, id, resp) - return resp, err -} - -func (c *CertificateClient) Delete(container *Certificate) error { - return c.rancherClient.doResourceDelete(CERTIFICATE_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_client.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_client.go deleted file mode 100644 index ff9d7fb1..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_client.go +++ /dev/null @@ -1,182 +0,0 @@ -package client - -type RancherClient struct { - RancherBaseClient - - Subscribe SubscribeOperations - Publish PublishOperations - RestartPolicy RestartPolicyOperations - LoadBalancerHealthCheck LoadBalancerHealthCheckOperations - LoadBalancerCookieStickinessPolicy LoadBalancerCookieStickinessPolicyOperations - LoadBalancerAppCookieStickinessPolicy LoadBalancerAppCookieStickinessPolicyOperations - GlobalLoadBalancerPolicy GlobalLoadBalancerPolicyOperations - GlobalLoadBalancerHealthCheck GlobalLoadBalancerHealthCheckOperations - Container ContainerOperations - ApiKey ApiKeyOperations - InstanceStop InstanceStopOperations - InstanceConsole InstanceConsoleOperations - InstanceConsoleInput InstanceConsoleInputOperations - IpAddressAssociateInput IpAddressAssociateInputOperations - Project ProjectOperations - AddRemoveLoadBalancerListenerInput AddRemoveLoadBalancerListenerInputOperations - AddRemoveLoadBalancerTargetInput AddRemoveLoadBalancerTargetInputOperations - AddLoadBalancerInput AddLoadBalancerInputOperations - RemoveLoadBalancerInput RemoveLoadBalancerInputOperations - AddRemoveLoadBalancerHostInput AddRemoveLoadBalancerHostInputOperations - SetLoadBalancerListenersInput SetLoadBalancerListenersInputOperations - SetLoadBalancerTargetsInput SetLoadBalancerTargetsInputOperations - SetLoadBalancerHostsInput SetLoadBalancerHostsInputOperations - Cluster ClusterOperations - AddRemoveClusterHostInput AddRemoveClusterHostInputOperations - RegistryCredential RegistryCredentialOperations - Registry RegistryOperations - Account AccountOperations - Agent AgentOperations - Certificate CertificateOperations - ConfigItem ConfigItemOperations - ConfigItemStatus ConfigItemStatusOperations - ContainerEvent ContainerEventOperations - Credential CredentialOperations - Databasechangelog DatabasechangelogOperations - Databasechangeloglock DatabasechangeloglockOperations - ExternalHandler ExternalHandlerOperations - ExternalHandlerExternalHandlerProcessMap ExternalHandlerExternalHandlerProcessMapOperations - ExternalHandlerProcess ExternalHandlerProcessOperations - GlobalLoadBalancer GlobalLoadBalancerOperations - Host HostOperations - Image ImageOperations - Instance InstanceOperations - InstanceLink InstanceLinkOperations - IpAddress IpAddressOperations - LoadBalancer LoadBalancerOperations - LoadBalancerConfig LoadBalancerConfigOperations - LoadBalancerListener LoadBalancerListenerOperations - LoadBalancerTarget LoadBalancerTargetOperations - Mount MountOperations - Network NetworkOperations - PhysicalHost PhysicalHostOperations - Port PortOperations - ProcessExecution ProcessExecutionOperations - ProcessInstance ProcessInstanceOperations - Setting SettingOperations - StoragePool StoragePoolOperations - Task TaskOperations - TaskInstance TaskInstanceOperations - Volume VolumeOperations - TypeDocumentation TypeDocumentationOperations - ContainerExec ContainerExecOperations - ContainerLogs ContainerLogsOperations - HostAccess HostAccessOperations - ActiveSetting ActiveSettingOperations - ExtensionImplementation ExtensionImplementationOperations - ExtensionPoint ExtensionPointOperations - ProcessDefinition ProcessDefinitionOperations - ResourceDefinition ResourceDefinitionOperations - Githubconfig GithubconfigOperations - StatsAccess StatsAccessOperations - VirtualboxConfig VirtualboxConfigOperations - DigitaloceanConfig DigitaloceanConfigOperations - Machine MachineOperations - Register RegisterOperations - RegistrationToken RegistrationTokenOperations -} - -func constructClient() *RancherClient { - client := &RancherClient{ - RancherBaseClient: RancherBaseClient{ - Types: map[string]Schema{}, - }, - } - - - client.Subscribe = newSubscribeClient(client) - client.Publish = newPublishClient(client) - client.RestartPolicy = newRestartPolicyClient(client) - client.LoadBalancerHealthCheck = newLoadBalancerHealthCheckClient(client) - client.LoadBalancerCookieStickinessPolicy = newLoadBalancerCookieStickinessPolicyClient(client) - client.LoadBalancerAppCookieStickinessPolicy = newLoadBalancerAppCookieStickinessPolicyClient(client) - client.GlobalLoadBalancerPolicy = newGlobalLoadBalancerPolicyClient(client) - client.GlobalLoadBalancerHealthCheck = newGlobalLoadBalancerHealthCheckClient(client) - client.Container = newContainerClient(client) - client.ApiKey = newApiKeyClient(client) - client.InstanceStop = newInstanceStopClient(client) - client.InstanceConsole = newInstanceConsoleClient(client) - client.InstanceConsoleInput = newInstanceConsoleInputClient(client) - client.IpAddressAssociateInput = newIpAddressAssociateInputClient(client) - client.Project = newProjectClient(client) - client.AddRemoveLoadBalancerListenerInput = newAddRemoveLoadBalancerListenerInputClient(client) - client.AddRemoveLoadBalancerTargetInput = newAddRemoveLoadBalancerTargetInputClient(client) - client.AddLoadBalancerInput = newAddLoadBalancerInputClient(client) - client.RemoveLoadBalancerInput = newRemoveLoadBalancerInputClient(client) - client.AddRemoveLoadBalancerHostInput = newAddRemoveLoadBalancerHostInputClient(client) - client.SetLoadBalancerListenersInput = newSetLoadBalancerListenersInputClient(client) - client.SetLoadBalancerTargetsInput = newSetLoadBalancerTargetsInputClient(client) - client.SetLoadBalancerHostsInput = newSetLoadBalancerHostsInputClient(client) - client.Cluster = newClusterClient(client) - client.AddRemoveClusterHostInput = newAddRemoveClusterHostInputClient(client) - client.RegistryCredential = newRegistryCredentialClient(client) - client.Registry = newRegistryClient(client) - client.Account = newAccountClient(client) - client.Agent = newAgentClient(client) - client.Certificate = newCertificateClient(client) - client.ConfigItem = newConfigItemClient(client) - client.ConfigItemStatus = newConfigItemStatusClient(client) - client.ContainerEvent = newContainerEventClient(client) - client.Credential = newCredentialClient(client) - client.Databasechangelog = newDatabasechangelogClient(client) - client.Databasechangeloglock = newDatabasechangeloglockClient(client) - client.ExternalHandler = newExternalHandlerClient(client) - client.ExternalHandlerExternalHandlerProcessMap = newExternalHandlerExternalHandlerProcessMapClient(client) - client.ExternalHandlerProcess = newExternalHandlerProcessClient(client) - client.GlobalLoadBalancer = newGlobalLoadBalancerClient(client) - client.Host = newHostClient(client) - client.Image = newImageClient(client) - client.Instance = newInstanceClient(client) - client.InstanceLink = newInstanceLinkClient(client) - client.IpAddress = newIpAddressClient(client) - client.LoadBalancer = newLoadBalancerClient(client) - client.LoadBalancerConfig = newLoadBalancerConfigClient(client) - client.LoadBalancerListener = newLoadBalancerListenerClient(client) - client.LoadBalancerTarget = newLoadBalancerTargetClient(client) - client.Mount = newMountClient(client) - client.Network = newNetworkClient(client) - client.PhysicalHost = newPhysicalHostClient(client) - client.Port = newPortClient(client) - client.ProcessExecution = newProcessExecutionClient(client) - client.ProcessInstance = newProcessInstanceClient(client) - client.Setting = newSettingClient(client) - client.StoragePool = newStoragePoolClient(client) - client.Task = newTaskClient(client) - client.TaskInstance = newTaskInstanceClient(client) - client.Volume = newVolumeClient(client) - client.TypeDocumentation = newTypeDocumentationClient(client) - client.ContainerExec = newContainerExecClient(client) - client.ContainerLogs = newContainerLogsClient(client) - client.HostAccess = newHostAccessClient(client) - client.ActiveSetting = newActiveSettingClient(client) - client.ExtensionImplementation = newExtensionImplementationClient(client) - client.ExtensionPoint = newExtensionPointClient(client) - client.ProcessDefinition = newProcessDefinitionClient(client) - client.ResourceDefinition = newResourceDefinitionClient(client) - client.Githubconfig = newGithubconfigClient(client) - client.StatsAccess = newStatsAccessClient(client) - client.VirtualboxConfig = newVirtualboxConfigClient(client) - client.DigitaloceanConfig = newDigitaloceanConfigClient(client) - client.Machine = newMachineClient(client) - client.Register = newRegisterClient(client) - client.RegistrationToken = newRegistrationTokenClient(client) - - - return client -} - -func NewRancherClient(opts *ClientOpts) (*RancherClient, error) { - client := constructClient() - - err := setupRancherBaseClient(&client.RancherBaseClient, opts) - if err != nil { - return nil, err - } - - return client, nil -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_cluster.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_cluster.go deleted file mode 100644 index cdf8bb87..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_cluster.go +++ /dev/null @@ -1,103 +0,0 @@ -package client - -const ( - CLUSTER_TYPE = "cluster" -) - -type Cluster struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - AgentId string `json:"agentId,omitempty"` - - ApiProxy string `json:"apiProxy,omitempty"` - - CertificateId string `json:"certificateId,omitempty"` - - ComputeTotal int `json:"computeTotal,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - DiscoverySpec string `json:"discoverySpec,omitempty"` - - Info interface{} `json:"info,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - PhysicalHostId string `json:"physicalHostId,omitempty"` - - Port int `json:"port,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ClusterCollection struct { - Collection - Data []Cluster `json:"data,omitempty"` -} - -type ClusterClient struct { - rancherClient *RancherClient -} - -type ClusterOperations interface { - List(opts *ListOpts) (*ClusterCollection, error) - Create(opts *Cluster) (*Cluster, error) - Update(existing *Cluster, updates interface{}) (*Cluster, error) - ById(id string) (*Cluster, error) - Delete(container *Cluster) error -} - -func newClusterClient(rancherClient *RancherClient) *ClusterClient { - return &ClusterClient{ - rancherClient: rancherClient, - } -} - -func (c *ClusterClient) Create(container *Cluster) (*Cluster, error) { - resp := &Cluster{} - err := c.rancherClient.doCreate(CLUSTER_TYPE, container, resp) - return resp, err -} - -func (c *ClusterClient) Update(existing *Cluster, updates interface{}) (*Cluster, error) { - resp := &Cluster{} - err := c.rancherClient.doUpdate(CLUSTER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ClusterClient) List(opts *ListOpts) (*ClusterCollection, error) { - resp := &ClusterCollection{} - err := c.rancherClient.doList(CLUSTER_TYPE, opts, resp) - return resp, err -} - -func (c *ClusterClient) ById(id string) (*Cluster, error) { - resp := &Cluster{} - err := c.rancherClient.doById(CLUSTER_TYPE, id, resp) - return resp, err -} - -func (c *ClusterClient) Delete(container *Cluster) error { - return c.rancherClient.doResourceDelete(CLUSTER_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item.go deleted file mode 100644 index 194537ce..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - CONFIG_ITEM_TYPE = "configItem" -) - -type ConfigItem struct { - Resource - - Name string `json:"name,omitempty"` - - SourceVersion string `json:"sourceVersion,omitempty"` - -} - -type ConfigItemCollection struct { - Collection - Data []ConfigItem `json:"data,omitempty"` -} - -type ConfigItemClient struct { - rancherClient *RancherClient -} - -type ConfigItemOperations interface { - List(opts *ListOpts) (*ConfigItemCollection, error) - Create(opts *ConfigItem) (*ConfigItem, error) - Update(existing *ConfigItem, updates interface{}) (*ConfigItem, error) - ById(id string) (*ConfigItem, error) - Delete(container *ConfigItem) error -} - -func newConfigItemClient(rancherClient *RancherClient) *ConfigItemClient { - return &ConfigItemClient{ - rancherClient: rancherClient, - } -} - -func (c *ConfigItemClient) Create(container *ConfigItem) (*ConfigItem, error) { - resp := &ConfigItem{} - err := c.rancherClient.doCreate(CONFIG_ITEM_TYPE, container, resp) - return resp, err -} - -func (c *ConfigItemClient) Update(existing *ConfigItem, updates interface{}) (*ConfigItem, error) { - resp := &ConfigItem{} - err := c.rancherClient.doUpdate(CONFIG_ITEM_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ConfigItemClient) List(opts *ListOpts) (*ConfigItemCollection, error) { - resp := &ConfigItemCollection{} - err := c.rancherClient.doList(CONFIG_ITEM_TYPE, opts, resp) - return resp, err -} - -func (c *ConfigItemClient) ById(id string) (*ConfigItem, error) { - resp := &ConfigItem{} - err := c.rancherClient.doById(CONFIG_ITEM_TYPE, id, resp) - return resp, err -} - -func (c *ConfigItemClient) Delete(container *ConfigItem) error { - return c.rancherClient.doResourceDelete(CONFIG_ITEM_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item_status.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item_status.go deleted file mode 100644 index 8551fb5c..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_config_item_status.go +++ /dev/null @@ -1,75 +0,0 @@ -package client - -const ( - CONFIG_ITEM_STATUS_TYPE = "configItemStatus" -) - -type ConfigItemStatus struct { - Resource - - AgentId string `json:"agentId,omitempty"` - - AppliedUpdated string `json:"appliedUpdated,omitempty"` - - AppliedVersion int `json:"appliedVersion,omitempty"` - - Name string `json:"name,omitempty"` - - RequestedUpdated string `json:"requestedUpdated,omitempty"` - - RequestedVersion int `json:"requestedVersion,omitempty"` - - SourceVersion string `json:"sourceVersion,omitempty"` - -} - -type ConfigItemStatusCollection struct { - Collection - Data []ConfigItemStatus `json:"data,omitempty"` -} - -type ConfigItemStatusClient struct { - rancherClient *RancherClient -} - -type ConfigItemStatusOperations interface { - List(opts *ListOpts) (*ConfigItemStatusCollection, error) - Create(opts *ConfigItemStatus) (*ConfigItemStatus, error) - Update(existing *ConfigItemStatus, updates interface{}) (*ConfigItemStatus, error) - ById(id string) (*ConfigItemStatus, error) - Delete(container *ConfigItemStatus) error -} - -func newConfigItemStatusClient(rancherClient *RancherClient) *ConfigItemStatusClient { - return &ConfigItemStatusClient{ - rancherClient: rancherClient, - } -} - -func (c *ConfigItemStatusClient) Create(container *ConfigItemStatus) (*ConfigItemStatus, error) { - resp := &ConfigItemStatus{} - err := c.rancherClient.doCreate(CONFIG_ITEM_STATUS_TYPE, container, resp) - return resp, err -} - -func (c *ConfigItemStatusClient) Update(existing *ConfigItemStatus, updates interface{}) (*ConfigItemStatus, error) { - resp := &ConfigItemStatus{} - err := c.rancherClient.doUpdate(CONFIG_ITEM_STATUS_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ConfigItemStatusClient) List(opts *ListOpts) (*ConfigItemStatusCollection, error) { - resp := &ConfigItemStatusCollection{} - err := c.rancherClient.doList(CONFIG_ITEM_STATUS_TYPE, opts, resp) - return resp, err -} - -func (c *ConfigItemStatusClient) ById(id string) (*ConfigItemStatus, error) { - resp := &ConfigItemStatus{} - err := c.rancherClient.doById(CONFIG_ITEM_STATUS_TYPE, id, resp) - return resp, err -} - -func (c *ConfigItemStatusClient) Delete(container *ConfigItemStatus) error { - return c.rancherClient.doResourceDelete(CONFIG_ITEM_STATUS_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container.go deleted file mode 100644 index 029a6d54..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container.go +++ /dev/null @@ -1,165 +0,0 @@ -package client - -const ( - CONTAINER_TYPE = "container" -) - -type Container struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - AgentId string `json:"agentId,omitempty"` - - AllocationState string `json:"allocationState,omitempty"` - - CapAdd []string `json:"capAdd,omitempty"` - - CapDrop []string `json:"capDrop,omitempty"` - - Command string `json:"command,omitempty"` - - CommandArgs []string `json:"commandArgs,omitempty"` - - Count int `json:"count,omitempty"` - - CpuSet string `json:"cpuSet,omitempty"` - - CpuShares int `json:"cpuShares,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - DataVolumes []string `json:"dataVolumes,omitempty"` - - DataVolumesFrom []string `json:"dataVolumesFrom,omitempty"` - - Description string `json:"description,omitempty"` - - Devices []string `json:"devices,omitempty"` - - Directory string `json:"directory,omitempty"` - - Dns []string `json:"dns,omitempty"` - - DnsSearch []string `json:"dnsSearch,omitempty"` - - DomainName string `json:"domainName,omitempty"` - - EntryPoint []string `json:"entryPoint,omitempty"` - - Environment map[string]interface{} `json:"environment,omitempty"` - - FirstRunning string `json:"firstRunning,omitempty"` - - Hostname string `json:"hostname,omitempty"` - - ImageUuid string `json:"imageUuid,omitempty"` - - InstanceLinks map[string]interface{} `json:"instanceLinks,omitempty"` - - Kind string `json:"kind,omitempty"` - - LxcConf map[string]interface{} `json:"lxcConf,omitempty"` - - Memory int `json:"memory,omitempty"` - - MemorySwap int `json:"memorySwap,omitempty"` - - Name string `json:"name,omitempty"` - - NetworkIds []string `json:"networkIds,omitempty"` - - Ports []string `json:"ports,omitempty"` - - PrimaryIpAddress string `json:"primaryIpAddress,omitempty"` - - Privileged bool `json:"privileged,omitempty"` - - PublishAllPorts bool `json:"publishAllPorts,omitempty"` - - RegistryCredentialId string `json:"registryCredentialId,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - RequestedHostId string `json:"requestedHostId,omitempty"` - - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` - - StartOnCreate bool `json:"startOnCreate,omitempty"` - - State string `json:"state,omitempty"` - - StdinOpen bool `json:"stdinOpen,omitempty"` - - SystemContainer string `json:"systemContainer,omitempty"` - - Token string `json:"token,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Tty bool `json:"tty,omitempty"` - - User string `json:"user,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ContainerCollection struct { - Collection - Data []Container `json:"data,omitempty"` -} - -type ContainerClient struct { - rancherClient *RancherClient -} - -type ContainerOperations interface { - List(opts *ListOpts) (*ContainerCollection, error) - Create(opts *Container) (*Container, error) - Update(existing *Container, updates interface{}) (*Container, error) - ById(id string) (*Container, error) - Delete(container *Container) error -} - -func newContainerClient(rancherClient *RancherClient) *ContainerClient { - return &ContainerClient{ - rancherClient: rancherClient, - } -} - -func (c *ContainerClient) Create(container *Container) (*Container, error) { - resp := &Container{} - err := c.rancherClient.doCreate(CONTAINER_TYPE, container, resp) - return resp, err -} - -func (c *ContainerClient) Update(existing *Container, updates interface{}) (*Container, error) { - resp := &Container{} - err := c.rancherClient.doUpdate(CONTAINER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ContainerClient) List(opts *ListOpts) (*ContainerCollection, error) { - resp := &ContainerCollection{} - err := c.rancherClient.doList(CONTAINER_TYPE, opts, resp) - return resp, err -} - -func (c *ContainerClient) ById(id string) (*Container, error) { - resp := &Container{} - err := c.rancherClient.doById(CONTAINER_TYPE, id, resp) - return resp, err -} - -func (c *ContainerClient) Delete(container *Container) error { - return c.rancherClient.doResourceDelete(CONTAINER_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_event.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_event.go deleted file mode 100644 index e430e52b..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_event.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -const ( - CONTAINER_EVENT_TYPE = "containerEvent" -) - -type ContainerEvent struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - DockerInspect interface{} `json:"dockerInspect,omitempty"` - - ExternalFrom string `json:"externalFrom,omitempty"` - - ExternalId string `json:"externalId,omitempty"` - - ExternalStatus string `json:"externalStatus,omitempty"` - - ExternalTimestamp int `json:"externalTimestamp,omitempty"` - - HostId string `json:"hostId,omitempty"` - - Kind string `json:"kind,omitempty"` - - ReportedHostUuid string `json:"reportedHostUuid,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - -} - -type ContainerEventCollection struct { - Collection - Data []ContainerEvent `json:"data,omitempty"` -} - -type ContainerEventClient struct { - rancherClient *RancherClient -} - -type ContainerEventOperations interface { - List(opts *ListOpts) (*ContainerEventCollection, error) - Create(opts *ContainerEvent) (*ContainerEvent, error) - Update(existing *ContainerEvent, updates interface{}) (*ContainerEvent, error) - ById(id string) (*ContainerEvent, error) - Delete(container *ContainerEvent) error - ActionCreate (*ContainerEvent) (*ContainerEvent, error) - ActionRemove (*ContainerEvent) (*ContainerEvent, error) -} - -func newContainerEventClient(rancherClient *RancherClient) *ContainerEventClient { - return &ContainerEventClient{ - rancherClient: rancherClient, - } -} - -func (c *ContainerEventClient) Create(container *ContainerEvent) (*ContainerEvent, error) { - resp := &ContainerEvent{} - err := c.rancherClient.doCreate(CONTAINER_EVENT_TYPE, container, resp) - return resp, err -} - -func (c *ContainerEventClient) Update(existing *ContainerEvent, updates interface{}) (*ContainerEvent, error) { - resp := &ContainerEvent{} - err := c.rancherClient.doUpdate(CONTAINER_EVENT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ContainerEventClient) List(opts *ListOpts) (*ContainerEventCollection, error) { - resp := &ContainerEventCollection{} - err := c.rancherClient.doList(CONTAINER_EVENT_TYPE, opts, resp) - return resp, err -} - -func (c *ContainerEventClient) ById(id string) (*ContainerEvent, error) { - resp := &ContainerEvent{} - err := c.rancherClient.doById(CONTAINER_EVENT_TYPE, id, resp) - return resp, err -} - -func (c *ContainerEventClient) Delete(container *ContainerEvent) error { - return c.rancherClient.doResourceDelete(CONTAINER_EVENT_TYPE, &container.Resource) -} - -func (c *ContainerEventClient) ActionCreate(resource *ContainerEvent) (*ContainerEvent, error) { - resp := &ContainerEvent{} - err := c.rancherClient.doEmptyAction(CONTAINER_EVENT_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *ContainerEventClient) ActionRemove(resource *ContainerEvent) (*ContainerEvent, error) { - resp := &ContainerEvent{} - err := c.rancherClient.doEmptyAction(CONTAINER_EVENT_TYPE, "remove", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_exec.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_exec.go deleted file mode 100644 index 077a756a..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_exec.go +++ /dev/null @@ -1,69 +0,0 @@ -package client - -const ( - CONTAINER_EXEC_TYPE = "containerExec" -) - -type ContainerExec struct { - Resource - - AttachStdin bool `json:"attachStdin,omitempty"` - - AttachStdout bool `json:"attachStdout,omitempty"` - - Command []string `json:"command,omitempty"` - - Tty bool `json:"tty,omitempty"` - -} - -type ContainerExecCollection struct { - Collection - Data []ContainerExec `json:"data,omitempty"` -} - -type ContainerExecClient struct { - rancherClient *RancherClient -} - -type ContainerExecOperations interface { - List(opts *ListOpts) (*ContainerExecCollection, error) - Create(opts *ContainerExec) (*ContainerExec, error) - Update(existing *ContainerExec, updates interface{}) (*ContainerExec, error) - ById(id string) (*ContainerExec, error) - Delete(container *ContainerExec) error -} - -func newContainerExecClient(rancherClient *RancherClient) *ContainerExecClient { - return &ContainerExecClient{ - rancherClient: rancherClient, - } -} - -func (c *ContainerExecClient) Create(container *ContainerExec) (*ContainerExec, error) { - resp := &ContainerExec{} - err := c.rancherClient.doCreate(CONTAINER_EXEC_TYPE, container, resp) - return resp, err -} - -func (c *ContainerExecClient) Update(existing *ContainerExec, updates interface{}) (*ContainerExec, error) { - resp := &ContainerExec{} - err := c.rancherClient.doUpdate(CONTAINER_EXEC_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ContainerExecClient) List(opts *ListOpts) (*ContainerExecCollection, error) { - resp := &ContainerExecCollection{} - err := c.rancherClient.doList(CONTAINER_EXEC_TYPE, opts, resp) - return resp, err -} - -func (c *ContainerExecClient) ById(id string) (*ContainerExec, error) { - resp := &ContainerExec{} - err := c.rancherClient.doById(CONTAINER_EXEC_TYPE, id, resp) - return resp, err -} - -func (c *ContainerExecClient) Delete(container *ContainerExec) error { - return c.rancherClient.doResourceDelete(CONTAINER_EXEC_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_logs.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_logs.go deleted file mode 100644 index 1355e57d..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_container_logs.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - CONTAINER_LOGS_TYPE = "containerLogs" -) - -type ContainerLogs struct { - Resource - - Follow bool `json:"follow,omitempty"` - - Lines int `json:"lines,omitempty"` - -} - -type ContainerLogsCollection struct { - Collection - Data []ContainerLogs `json:"data,omitempty"` -} - -type ContainerLogsClient struct { - rancherClient *RancherClient -} - -type ContainerLogsOperations interface { - List(opts *ListOpts) (*ContainerLogsCollection, error) - Create(opts *ContainerLogs) (*ContainerLogs, error) - Update(existing *ContainerLogs, updates interface{}) (*ContainerLogs, error) - ById(id string) (*ContainerLogs, error) - Delete(container *ContainerLogs) error -} - -func newContainerLogsClient(rancherClient *RancherClient) *ContainerLogsClient { - return &ContainerLogsClient{ - rancherClient: rancherClient, - } -} - -func (c *ContainerLogsClient) Create(container *ContainerLogs) (*ContainerLogs, error) { - resp := &ContainerLogs{} - err := c.rancherClient.doCreate(CONTAINER_LOGS_TYPE, container, resp) - return resp, err -} - -func (c *ContainerLogsClient) Update(existing *ContainerLogs, updates interface{}) (*ContainerLogs, error) { - resp := &ContainerLogs{} - err := c.rancherClient.doUpdate(CONTAINER_LOGS_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ContainerLogsClient) List(opts *ListOpts) (*ContainerLogsCollection, error) { - resp := &ContainerLogsCollection{} - err := c.rancherClient.doList(CONTAINER_LOGS_TYPE, opts, resp) - return resp, err -} - -func (c *ContainerLogsClient) ById(id string) (*ContainerLogs, error) { - resp := &ContainerLogs{} - err := c.rancherClient.doById(CONTAINER_LOGS_TYPE, id, resp) - return resp, err -} - -func (c *ContainerLogsClient) Delete(container *ContainerLogs) error { - return c.rancherClient.doResourceDelete(CONTAINER_LOGS_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_credential.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_credential.go deleted file mode 100644 index e03b169a..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_credential.go +++ /dev/null @@ -1,140 +0,0 @@ -package client - -const ( - CREDENTIAL_TYPE = "credential" -) - -type Credential struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - PublicValue string `json:"publicValue,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - SecretValue string `json:"secretValue,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type CredentialCollection struct { - Collection - Data []Credential `json:"data,omitempty"` -} - -type CredentialClient struct { - rancherClient *RancherClient -} - -type CredentialOperations interface { - List(opts *ListOpts) (*CredentialCollection, error) - Create(opts *Credential) (*Credential, error) - Update(existing *Credential, updates interface{}) (*Credential, error) - ById(id string) (*Credential, error) - Delete(container *Credential) error - ActionActivate (*Credential) (*Credential, error) - ActionCreate (*Credential) (*Credential, error) - ActionDeactivate (*Credential) (*Credential, error) - ActionPurge (*Credential) (*Credential, error) - ActionRemove (*Credential) (*Credential, error) - ActionRestore (*Credential) (*Credential, error) - ActionUpdate (*Credential) (*Credential, error) -} - -func newCredentialClient(rancherClient *RancherClient) *CredentialClient { - return &CredentialClient{ - rancherClient: rancherClient, - } -} - -func (c *CredentialClient) Create(container *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doCreate(CREDENTIAL_TYPE, container, resp) - return resp, err -} - -func (c *CredentialClient) Update(existing *Credential, updates interface{}) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doUpdate(CREDENTIAL_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *CredentialClient) List(opts *ListOpts) (*CredentialCollection, error) { - resp := &CredentialCollection{} - err := c.rancherClient.doList(CREDENTIAL_TYPE, opts, resp) - return resp, err -} - -func (c *CredentialClient) ById(id string) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doById(CREDENTIAL_TYPE, id, resp) - return resp, err -} - -func (c *CredentialClient) Delete(container *Credential) error { - return c.rancherClient.doResourceDelete(CREDENTIAL_TYPE, &container.Resource) -} - -func (c *CredentialClient) ActionActivate(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *CredentialClient) ActionCreate(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *CredentialClient) ActionDeactivate(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *CredentialClient) ActionPurge(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *CredentialClient) ActionRemove(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *CredentialClient) ActionRestore(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *CredentialClient) ActionUpdate(resource *Credential) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(CREDENTIAL_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangelog.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangelog.go deleted file mode 100644 index ea361032..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangelog.go +++ /dev/null @@ -1,81 +0,0 @@ -package client - -const ( - DATABASECHANGELOG_TYPE = "databasechangelog" -) - -type Databasechangelog struct { - Resource - - Author string `json:"author,omitempty"` - - Comments string `json:"comments,omitempty"` - - Dateexecuted string `json:"dateexecuted,omitempty"` - - Description string `json:"description,omitempty"` - - Exectype string `json:"exectype,omitempty"` - - Filename string `json:"filename,omitempty"` - - Liquibase string `json:"liquibase,omitempty"` - - Md5sum string `json:"md5sum,omitempty"` - - Orderexecuted int `json:"orderexecuted,omitempty"` - - Tag string `json:"tag,omitempty"` - -} - -type DatabasechangelogCollection struct { - Collection - Data []Databasechangelog `json:"data,omitempty"` -} - -type DatabasechangelogClient struct { - rancherClient *RancherClient -} - -type DatabasechangelogOperations interface { - List(opts *ListOpts) (*DatabasechangelogCollection, error) - Create(opts *Databasechangelog) (*Databasechangelog, error) - Update(existing *Databasechangelog, updates interface{}) (*Databasechangelog, error) - ById(id string) (*Databasechangelog, error) - Delete(container *Databasechangelog) error -} - -func newDatabasechangelogClient(rancherClient *RancherClient) *DatabasechangelogClient { - return &DatabasechangelogClient{ - rancherClient: rancherClient, - } -} - -func (c *DatabasechangelogClient) Create(container *Databasechangelog) (*Databasechangelog, error) { - resp := &Databasechangelog{} - err := c.rancherClient.doCreate(DATABASECHANGELOG_TYPE, container, resp) - return resp, err -} - -func (c *DatabasechangelogClient) Update(existing *Databasechangelog, updates interface{}) (*Databasechangelog, error) { - resp := &Databasechangelog{} - err := c.rancherClient.doUpdate(DATABASECHANGELOG_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *DatabasechangelogClient) List(opts *ListOpts) (*DatabasechangelogCollection, error) { - resp := &DatabasechangelogCollection{} - err := c.rancherClient.doList(DATABASECHANGELOG_TYPE, opts, resp) - return resp, err -} - -func (c *DatabasechangelogClient) ById(id string) (*Databasechangelog, error) { - resp := &Databasechangelog{} - err := c.rancherClient.doById(DATABASECHANGELOG_TYPE, id, resp) - return resp, err -} - -func (c *DatabasechangelogClient) Delete(container *Databasechangelog) error { - return c.rancherClient.doResourceDelete(DATABASECHANGELOG_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangeloglock.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangeloglock.go deleted file mode 100644 index 1b6ba9a2..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_databasechangeloglock.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -const ( - DATABASECHANGELOGLOCK_TYPE = "databasechangeloglock" -) - -type Databasechangeloglock struct { - Resource - - Locked bool `json:"locked,omitempty"` - - Lockedby string `json:"lockedby,omitempty"` - - Lockgranted string `json:"lockgranted,omitempty"` - -} - -type DatabasechangeloglockCollection struct { - Collection - Data []Databasechangeloglock `json:"data,omitempty"` -} - -type DatabasechangeloglockClient struct { - rancherClient *RancherClient -} - -type DatabasechangeloglockOperations interface { - List(opts *ListOpts) (*DatabasechangeloglockCollection, error) - Create(opts *Databasechangeloglock) (*Databasechangeloglock, error) - Update(existing *Databasechangeloglock, updates interface{}) (*Databasechangeloglock, error) - ById(id string) (*Databasechangeloglock, error) - Delete(container *Databasechangeloglock) error -} - -func newDatabasechangeloglockClient(rancherClient *RancherClient) *DatabasechangeloglockClient { - return &DatabasechangeloglockClient{ - rancherClient: rancherClient, - } -} - -func (c *DatabasechangeloglockClient) Create(container *Databasechangeloglock) (*Databasechangeloglock, error) { - resp := &Databasechangeloglock{} - err := c.rancherClient.doCreate(DATABASECHANGELOGLOCK_TYPE, container, resp) - return resp, err -} - -func (c *DatabasechangeloglockClient) Update(existing *Databasechangeloglock, updates interface{}) (*Databasechangeloglock, error) { - resp := &Databasechangeloglock{} - err := c.rancherClient.doUpdate(DATABASECHANGELOGLOCK_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *DatabasechangeloglockClient) List(opts *ListOpts) (*DatabasechangeloglockCollection, error) { - resp := &DatabasechangeloglockCollection{} - err := c.rancherClient.doList(DATABASECHANGELOGLOCK_TYPE, opts, resp) - return resp, err -} - -func (c *DatabasechangeloglockClient) ById(id string) (*Databasechangeloglock, error) { - resp := &Databasechangeloglock{} - err := c.rancherClient.doById(DATABASECHANGELOGLOCK_TYPE, id, resp) - return resp, err -} - -func (c *DatabasechangeloglockClient) Delete(container *Databasechangeloglock) error { - return c.rancherClient.doResourceDelete(DATABASECHANGELOGLOCK_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_digitalocean_config.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_digitalocean_config.go deleted file mode 100644 index cf68c610..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_digitalocean_config.go +++ /dev/null @@ -1,69 +0,0 @@ -package client - -const ( - DIGITALOCEAN_CONFIG_TYPE = "digitaloceanConfig" -) - -type DigitaloceanConfig struct { - Resource - - AccessToken string `json:"accessToken,omitempty"` - - Image string `json:"image,omitempty"` - - Region string `json:"region,omitempty"` - - Size string `json:"size,omitempty"` - -} - -type DigitaloceanConfigCollection struct { - Collection - Data []DigitaloceanConfig `json:"data,omitempty"` -} - -type DigitaloceanConfigClient struct { - rancherClient *RancherClient -} - -type DigitaloceanConfigOperations interface { - List(opts *ListOpts) (*DigitaloceanConfigCollection, error) - Create(opts *DigitaloceanConfig) (*DigitaloceanConfig, error) - Update(existing *DigitaloceanConfig, updates interface{}) (*DigitaloceanConfig, error) - ById(id string) (*DigitaloceanConfig, error) - Delete(container *DigitaloceanConfig) error -} - -func newDigitaloceanConfigClient(rancherClient *RancherClient) *DigitaloceanConfigClient { - return &DigitaloceanConfigClient{ - rancherClient: rancherClient, - } -} - -func (c *DigitaloceanConfigClient) Create(container *DigitaloceanConfig) (*DigitaloceanConfig, error) { - resp := &DigitaloceanConfig{} - err := c.rancherClient.doCreate(DIGITALOCEAN_CONFIG_TYPE, container, resp) - return resp, err -} - -func (c *DigitaloceanConfigClient) Update(existing *DigitaloceanConfig, updates interface{}) (*DigitaloceanConfig, error) { - resp := &DigitaloceanConfig{} - err := c.rancherClient.doUpdate(DIGITALOCEAN_CONFIG_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *DigitaloceanConfigClient) List(opts *ListOpts) (*DigitaloceanConfigCollection, error) { - resp := &DigitaloceanConfigCollection{} - err := c.rancherClient.doList(DIGITALOCEAN_CONFIG_TYPE, opts, resp) - return resp, err -} - -func (c *DigitaloceanConfigClient) ById(id string) (*DigitaloceanConfig, error) { - resp := &DigitaloceanConfig{} - err := c.rancherClient.doById(DIGITALOCEAN_CONFIG_TYPE, id, resp) - return resp, err -} - -func (c *DigitaloceanConfigClient) Delete(container *DigitaloceanConfig) error { - return c.rancherClient.doResourceDelete(DIGITALOCEAN_CONFIG_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_implementation.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_implementation.go deleted file mode 100644 index 9e37e120..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_implementation.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -const ( - EXTENSION_IMPLEMENTATION_TYPE = "extensionImplementation" -) - -type ExtensionImplementation struct { - Resource - - ClassName string `json:"className,omitempty"` - - Name string `json:"name,omitempty"` - - Properties map[string]interface{} `json:"properties,omitempty"` - -} - -type ExtensionImplementationCollection struct { - Collection - Data []ExtensionImplementation `json:"data,omitempty"` -} - -type ExtensionImplementationClient struct { - rancherClient *RancherClient -} - -type ExtensionImplementationOperations interface { - List(opts *ListOpts) (*ExtensionImplementationCollection, error) - Create(opts *ExtensionImplementation) (*ExtensionImplementation, error) - Update(existing *ExtensionImplementation, updates interface{}) (*ExtensionImplementation, error) - ById(id string) (*ExtensionImplementation, error) - Delete(container *ExtensionImplementation) error -} - -func newExtensionImplementationClient(rancherClient *RancherClient) *ExtensionImplementationClient { - return &ExtensionImplementationClient{ - rancherClient: rancherClient, - } -} - -func (c *ExtensionImplementationClient) Create(container *ExtensionImplementation) (*ExtensionImplementation, error) { - resp := &ExtensionImplementation{} - err := c.rancherClient.doCreate(EXTENSION_IMPLEMENTATION_TYPE, container, resp) - return resp, err -} - -func (c *ExtensionImplementationClient) Update(existing *ExtensionImplementation, updates interface{}) (*ExtensionImplementation, error) { - resp := &ExtensionImplementation{} - err := c.rancherClient.doUpdate(EXTENSION_IMPLEMENTATION_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ExtensionImplementationClient) List(opts *ListOpts) (*ExtensionImplementationCollection, error) { - resp := &ExtensionImplementationCollection{} - err := c.rancherClient.doList(EXTENSION_IMPLEMENTATION_TYPE, opts, resp) - return resp, err -} - -func (c *ExtensionImplementationClient) ById(id string) (*ExtensionImplementation, error) { - resp := &ExtensionImplementation{} - err := c.rancherClient.doById(EXTENSION_IMPLEMENTATION_TYPE, id, resp) - return resp, err -} - -func (c *ExtensionImplementationClient) Delete(container *ExtensionImplementation) error { - return c.rancherClient.doResourceDelete(EXTENSION_IMPLEMENTATION_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_point.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_point.go deleted file mode 100644 index 284aa976..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_extension_point.go +++ /dev/null @@ -1,71 +0,0 @@ -package client - -const ( - EXTENSION_POINT_TYPE = "extensionPoint" -) - -type ExtensionPoint struct { - Resource - - ExcludeSetting string `json:"excludeSetting,omitempty"` - - Implementations []interface{} `json:"implementations,omitempty"` - - IncludeSetting string `json:"includeSetting,omitempty"` - - ListSetting string `json:"listSetting,omitempty"` - - Name string `json:"name,omitempty"` - -} - -type ExtensionPointCollection struct { - Collection - Data []ExtensionPoint `json:"data,omitempty"` -} - -type ExtensionPointClient struct { - rancherClient *RancherClient -} - -type ExtensionPointOperations interface { - List(opts *ListOpts) (*ExtensionPointCollection, error) - Create(opts *ExtensionPoint) (*ExtensionPoint, error) - Update(existing *ExtensionPoint, updates interface{}) (*ExtensionPoint, error) - ById(id string) (*ExtensionPoint, error) - Delete(container *ExtensionPoint) error -} - -func newExtensionPointClient(rancherClient *RancherClient) *ExtensionPointClient { - return &ExtensionPointClient{ - rancherClient: rancherClient, - } -} - -func (c *ExtensionPointClient) Create(container *ExtensionPoint) (*ExtensionPoint, error) { - resp := &ExtensionPoint{} - err := c.rancherClient.doCreate(EXTENSION_POINT_TYPE, container, resp) - return resp, err -} - -func (c *ExtensionPointClient) Update(existing *ExtensionPoint, updates interface{}) (*ExtensionPoint, error) { - resp := &ExtensionPoint{} - err := c.rancherClient.doUpdate(EXTENSION_POINT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ExtensionPointClient) List(opts *ListOpts) (*ExtensionPointCollection, error) { - resp := &ExtensionPointCollection{} - err := c.rancherClient.doList(EXTENSION_POINT_TYPE, opts, resp) - return resp, err -} - -func (c *ExtensionPointClient) ById(id string) (*ExtensionPoint, error) { - resp := &ExtensionPoint{} - err := c.rancherClient.doById(EXTENSION_POINT_TYPE, id, resp) - return resp, err -} - -func (c *ExtensionPointClient) Delete(container *ExtensionPoint) error { - return c.rancherClient.doResourceDelete(EXTENSION_POINT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler.go deleted file mode 100644 index e3982427..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler.go +++ /dev/null @@ -1,142 +0,0 @@ -package client - -const ( - EXTERNAL_HANDLER_TYPE = "externalHandler" -) - -type ExternalHandler struct { - Resource - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - Priority int `json:"priority,omitempty"` - - ProcessNames []string `json:"processNames,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - Retries int `json:"retries,omitempty"` - - State string `json:"state,omitempty"` - - TimeoutMillis int `json:"timeoutMillis,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ExternalHandlerCollection struct { - Collection - Data []ExternalHandler `json:"data,omitempty"` -} - -type ExternalHandlerClient struct { - rancherClient *RancherClient -} - -type ExternalHandlerOperations interface { - List(opts *ListOpts) (*ExternalHandlerCollection, error) - Create(opts *ExternalHandler) (*ExternalHandler, error) - Update(existing *ExternalHandler, updates interface{}) (*ExternalHandler, error) - ById(id string) (*ExternalHandler, error) - Delete(container *ExternalHandler) error - ActionActivate (*ExternalHandler) (*ExternalHandler, error) - ActionCreate (*ExternalHandler) (*ExternalHandler, error) - ActionDeactivate (*ExternalHandler) (*ExternalHandler, error) - ActionPurge (*ExternalHandler) (*ExternalHandler, error) - ActionRemove (*ExternalHandler) (*ExternalHandler, error) - ActionRestore (*ExternalHandler) (*ExternalHandler, error) - ActionUpdate (*ExternalHandler) (*ExternalHandler, error) -} - -func newExternalHandlerClient(rancherClient *RancherClient) *ExternalHandlerClient { - return &ExternalHandlerClient{ - rancherClient: rancherClient, - } -} - -func (c *ExternalHandlerClient) Create(container *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doCreate(EXTERNAL_HANDLER_TYPE, container, resp) - return resp, err -} - -func (c *ExternalHandlerClient) Update(existing *ExternalHandler, updates interface{}) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doUpdate(EXTERNAL_HANDLER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ExternalHandlerClient) List(opts *ListOpts) (*ExternalHandlerCollection, error) { - resp := &ExternalHandlerCollection{} - err := c.rancherClient.doList(EXTERNAL_HANDLER_TYPE, opts, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ById(id string) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doById(EXTERNAL_HANDLER_TYPE, id, resp) - return resp, err -} - -func (c *ExternalHandlerClient) Delete(container *ExternalHandler) error { - return c.rancherClient.doResourceDelete(EXTERNAL_HANDLER_TYPE, &container.Resource) -} - -func (c *ExternalHandlerClient) ActionActivate(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ActionCreate(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ActionDeactivate(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ActionPurge(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ActionRemove(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ActionRestore(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerClient) ActionUpdate(resource *ExternalHandler) (*ExternalHandler, error) { - resp := &ExternalHandler{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_external_handler_process_map.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_external_handler_process_map.go deleted file mode 100644 index 86278578..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_external_handler_process_map.go +++ /dev/null @@ -1,138 +0,0 @@ -package client - -const ( - EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE = "externalHandlerExternalHandlerProcessMap" -) - -type ExternalHandlerExternalHandlerProcessMap struct { - Resource - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - ExternalHandlerId string `json:"externalHandlerId,omitempty"` - - ExternalHandlerProcessId string `json:"externalHandlerProcessId,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ExternalHandlerExternalHandlerProcessMapCollection struct { - Collection - Data []ExternalHandlerExternalHandlerProcessMap `json:"data,omitempty"` -} - -type ExternalHandlerExternalHandlerProcessMapClient struct { - rancherClient *RancherClient -} - -type ExternalHandlerExternalHandlerProcessMapOperations interface { - List(opts *ListOpts) (*ExternalHandlerExternalHandlerProcessMapCollection, error) - Create(opts *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - Update(existing *ExternalHandlerExternalHandlerProcessMap, updates interface{}) (*ExternalHandlerExternalHandlerProcessMap, error) - ById(id string) (*ExternalHandlerExternalHandlerProcessMap, error) - Delete(container *ExternalHandlerExternalHandlerProcessMap) error - ActionActivate (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - ActionCreate (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - ActionDeactivate (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - ActionPurge (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - ActionRemove (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - ActionRestore (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) - ActionUpdate (*ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) -} - -func newExternalHandlerExternalHandlerProcessMapClient(rancherClient *RancherClient) *ExternalHandlerExternalHandlerProcessMapClient { - return &ExternalHandlerExternalHandlerProcessMapClient{ - rancherClient: rancherClient, - } -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) Create(container *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doCreate(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, container, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) Update(existing *ExternalHandlerExternalHandlerProcessMap, updates interface{}) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doUpdate(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) List(opts *ListOpts) (*ExternalHandlerExternalHandlerProcessMapCollection, error) { - resp := &ExternalHandlerExternalHandlerProcessMapCollection{} - err := c.rancherClient.doList(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, opts, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ById(id string) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doById(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, id, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) Delete(container *ExternalHandlerExternalHandlerProcessMap) error { - return c.rancherClient.doResourceDelete(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, &container.Resource) -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionActivate(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionCreate(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionDeactivate(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionPurge(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionRemove(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionRestore(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerExternalHandlerProcessMapClient) ActionUpdate(resource *ExternalHandlerExternalHandlerProcessMap) (*ExternalHandlerExternalHandlerProcessMap, error) { - resp := &ExternalHandlerExternalHandlerProcessMap{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_process.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_process.go deleted file mode 100644 index 19eb00ab..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_external_handler_process.go +++ /dev/null @@ -1,134 +0,0 @@ -package client - -const ( - EXTERNAL_HANDLER_PROCESS_TYPE = "externalHandlerProcess" -) - -type ExternalHandlerProcess struct { - Resource - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ExternalHandlerProcessCollection struct { - Collection - Data []ExternalHandlerProcess `json:"data,omitempty"` -} - -type ExternalHandlerProcessClient struct { - rancherClient *RancherClient -} - -type ExternalHandlerProcessOperations interface { - List(opts *ListOpts) (*ExternalHandlerProcessCollection, error) - Create(opts *ExternalHandlerProcess) (*ExternalHandlerProcess, error) - Update(existing *ExternalHandlerProcess, updates interface{}) (*ExternalHandlerProcess, error) - ById(id string) (*ExternalHandlerProcess, error) - Delete(container *ExternalHandlerProcess) error - ActionActivate (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) - ActionCreate (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) - ActionDeactivate (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) - ActionPurge (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) - ActionRemove (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) - ActionRestore (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) - ActionUpdate (*ExternalHandlerProcess) (*ExternalHandlerProcess, error) -} - -func newExternalHandlerProcessClient(rancherClient *RancherClient) *ExternalHandlerProcessClient { - return &ExternalHandlerProcessClient{ - rancherClient: rancherClient, - } -} - -func (c *ExternalHandlerProcessClient) Create(container *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doCreate(EXTERNAL_HANDLER_PROCESS_TYPE, container, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) Update(existing *ExternalHandlerProcess, updates interface{}) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doUpdate(EXTERNAL_HANDLER_PROCESS_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) List(opts *ListOpts) (*ExternalHandlerProcessCollection, error) { - resp := &ExternalHandlerProcessCollection{} - err := c.rancherClient.doList(EXTERNAL_HANDLER_PROCESS_TYPE, opts, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ById(id string) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doById(EXTERNAL_HANDLER_PROCESS_TYPE, id, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) Delete(container *ExternalHandlerProcess) error { - return c.rancherClient.doResourceDelete(EXTERNAL_HANDLER_PROCESS_TYPE, &container.Resource) -} - -func (c *ExternalHandlerProcessClient) ActionActivate(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ActionCreate(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ActionDeactivate(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ActionPurge(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ActionRemove(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ActionRestore(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *ExternalHandlerProcessClient) ActionUpdate(resource *ExternalHandlerProcess) (*ExternalHandlerProcess, error) { - resp := &ExternalHandlerProcess{} - err := c.rancherClient.doEmptyAction(EXTERNAL_HANDLER_PROCESS_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_githubconfig.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_githubconfig.go deleted file mode 100644 index 342f4ce0..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_githubconfig.go +++ /dev/null @@ -1,71 +0,0 @@ -package client - -const ( - GITHUBCONFIG_TYPE = "githubconfig" -) - -type Githubconfig struct { - Resource - - AllowedOrganizations []string `json:"allowedOrganizations,omitempty"` - - AllowedUsers []string `json:"allowedUsers,omitempty"` - - ClientId string `json:"clientId,omitempty"` - - ClientSecret string `json:"clientSecret,omitempty"` - - Enabled bool `json:"enabled,omitempty"` - -} - -type GithubconfigCollection struct { - Collection - Data []Githubconfig `json:"data,omitempty"` -} - -type GithubconfigClient struct { - rancherClient *RancherClient -} - -type GithubconfigOperations interface { - List(opts *ListOpts) (*GithubconfigCollection, error) - Create(opts *Githubconfig) (*Githubconfig, error) - Update(existing *Githubconfig, updates interface{}) (*Githubconfig, error) - ById(id string) (*Githubconfig, error) - Delete(container *Githubconfig) error -} - -func newGithubconfigClient(rancherClient *RancherClient) *GithubconfigClient { - return &GithubconfigClient{ - rancherClient: rancherClient, - } -} - -func (c *GithubconfigClient) Create(container *Githubconfig) (*Githubconfig, error) { - resp := &Githubconfig{} - err := c.rancherClient.doCreate(GITHUBCONFIG_TYPE, container, resp) - return resp, err -} - -func (c *GithubconfigClient) Update(existing *Githubconfig, updates interface{}) (*Githubconfig, error) { - resp := &Githubconfig{} - err := c.rancherClient.doUpdate(GITHUBCONFIG_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *GithubconfigClient) List(opts *ListOpts) (*GithubconfigCollection, error) { - resp := &GithubconfigCollection{} - err := c.rancherClient.doList(GITHUBCONFIG_TYPE, opts, resp) - return resp, err -} - -func (c *GithubconfigClient) ById(id string) (*Githubconfig, error) { - resp := &Githubconfig{} - err := c.rancherClient.doById(GITHUBCONFIG_TYPE, id, resp) - return resp, err -} - -func (c *GithubconfigClient) Delete(container *Githubconfig) error { - return c.rancherClient.doResourceDelete(GITHUBCONFIG_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer.go deleted file mode 100644 index 7dd04913..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -const ( - GLOBAL_LOAD_BALANCER_TYPE = "globalLoadBalancer" -) - -type GlobalLoadBalancer struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - GlobalLoadBalancerHealthCheck []interface{} `json:"globalLoadBalancerHealthCheck,omitempty"` - - GlobalLoadBalancerPolicy []interface{} `json:"globalLoadBalancerPolicy,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type GlobalLoadBalancerCollection struct { - Collection - Data []GlobalLoadBalancer `json:"data,omitempty"` -} - -type GlobalLoadBalancerClient struct { - rancherClient *RancherClient -} - -type GlobalLoadBalancerOperations interface { - List(opts *ListOpts) (*GlobalLoadBalancerCollection, error) - Create(opts *GlobalLoadBalancer) (*GlobalLoadBalancer, error) - Update(existing *GlobalLoadBalancer, updates interface{}) (*GlobalLoadBalancer, error) - ById(id string) (*GlobalLoadBalancer, error) - Delete(container *GlobalLoadBalancer) error - ActionCreate (*GlobalLoadBalancer) (*GlobalLoadBalancer, error) - ActionRemove (*GlobalLoadBalancer) (*GlobalLoadBalancer, error) -} - -func newGlobalLoadBalancerClient(rancherClient *RancherClient) *GlobalLoadBalancerClient { - return &GlobalLoadBalancerClient{ - rancherClient: rancherClient, - } -} - -func (c *GlobalLoadBalancerClient) Create(container *GlobalLoadBalancer) (*GlobalLoadBalancer, error) { - resp := &GlobalLoadBalancer{} - err := c.rancherClient.doCreate(GLOBAL_LOAD_BALANCER_TYPE, container, resp) - return resp, err -} - -func (c *GlobalLoadBalancerClient) Update(existing *GlobalLoadBalancer, updates interface{}) (*GlobalLoadBalancer, error) { - resp := &GlobalLoadBalancer{} - err := c.rancherClient.doUpdate(GLOBAL_LOAD_BALANCER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *GlobalLoadBalancerClient) List(opts *ListOpts) (*GlobalLoadBalancerCollection, error) { - resp := &GlobalLoadBalancerCollection{} - err := c.rancherClient.doList(GLOBAL_LOAD_BALANCER_TYPE, opts, resp) - return resp, err -} - -func (c *GlobalLoadBalancerClient) ById(id string) (*GlobalLoadBalancer, error) { - resp := &GlobalLoadBalancer{} - err := c.rancherClient.doById(GLOBAL_LOAD_BALANCER_TYPE, id, resp) - return resp, err -} - -func (c *GlobalLoadBalancerClient) Delete(container *GlobalLoadBalancer) error { - return c.rancherClient.doResourceDelete(GLOBAL_LOAD_BALANCER_TYPE, &container.Resource) -} - -func (c *GlobalLoadBalancerClient) ActionCreate(resource *GlobalLoadBalancer) (*GlobalLoadBalancer, error) { - resp := &GlobalLoadBalancer{} - err := c.rancherClient.doEmptyAction(GLOBAL_LOAD_BALANCER_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *GlobalLoadBalancerClient) ActionRemove(resource *GlobalLoadBalancer) (*GlobalLoadBalancer, error) { - resp := &GlobalLoadBalancer{} - err := c.rancherClient.doEmptyAction(GLOBAL_LOAD_BALANCER_TYPE, "remove", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_health_check.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_health_check.go deleted file mode 100644 index a9a7b6ca..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_health_check.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - GLOBAL_LOAD_BALANCER_HEALTH_CHECK_TYPE = "globalLoadBalancerHealthCheck" -) - -type GlobalLoadBalancerHealthCheck struct { - Resource - - Name string `json:"name,omitempty"` - -} - -type GlobalLoadBalancerHealthCheckCollection struct { - Collection - Data []GlobalLoadBalancerHealthCheck `json:"data,omitempty"` -} - -type GlobalLoadBalancerHealthCheckClient struct { - rancherClient *RancherClient -} - -type GlobalLoadBalancerHealthCheckOperations interface { - List(opts *ListOpts) (*GlobalLoadBalancerHealthCheckCollection, error) - Create(opts *GlobalLoadBalancerHealthCheck) (*GlobalLoadBalancerHealthCheck, error) - Update(existing *GlobalLoadBalancerHealthCheck, updates interface{}) (*GlobalLoadBalancerHealthCheck, error) - ById(id string) (*GlobalLoadBalancerHealthCheck, error) - Delete(container *GlobalLoadBalancerHealthCheck) error -} - -func newGlobalLoadBalancerHealthCheckClient(rancherClient *RancherClient) *GlobalLoadBalancerHealthCheckClient { - return &GlobalLoadBalancerHealthCheckClient{ - rancherClient: rancherClient, - } -} - -func (c *GlobalLoadBalancerHealthCheckClient) Create(container *GlobalLoadBalancerHealthCheck) (*GlobalLoadBalancerHealthCheck, error) { - resp := &GlobalLoadBalancerHealthCheck{} - err := c.rancherClient.doCreate(GLOBAL_LOAD_BALANCER_HEALTH_CHECK_TYPE, container, resp) - return resp, err -} - -func (c *GlobalLoadBalancerHealthCheckClient) Update(existing *GlobalLoadBalancerHealthCheck, updates interface{}) (*GlobalLoadBalancerHealthCheck, error) { - resp := &GlobalLoadBalancerHealthCheck{} - err := c.rancherClient.doUpdate(GLOBAL_LOAD_BALANCER_HEALTH_CHECK_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *GlobalLoadBalancerHealthCheckClient) List(opts *ListOpts) (*GlobalLoadBalancerHealthCheckCollection, error) { - resp := &GlobalLoadBalancerHealthCheckCollection{} - err := c.rancherClient.doList(GLOBAL_LOAD_BALANCER_HEALTH_CHECK_TYPE, opts, resp) - return resp, err -} - -func (c *GlobalLoadBalancerHealthCheckClient) ById(id string) (*GlobalLoadBalancerHealthCheck, error) { - resp := &GlobalLoadBalancerHealthCheck{} - err := c.rancherClient.doById(GLOBAL_LOAD_BALANCER_HEALTH_CHECK_TYPE, id, resp) - return resp, err -} - -func (c *GlobalLoadBalancerHealthCheckClient) Delete(container *GlobalLoadBalancerHealthCheck) error { - return c.rancherClient.doResourceDelete(GLOBAL_LOAD_BALANCER_HEALTH_CHECK_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_policy.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_policy.go deleted file mode 100644 index 5706064a..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_global_load_balancer_policy.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - GLOBAL_LOAD_BALANCER_POLICY_TYPE = "globalLoadBalancerPolicy" -) - -type GlobalLoadBalancerPolicy struct { - Resource - - Name string `json:"name,omitempty"` - -} - -type GlobalLoadBalancerPolicyCollection struct { - Collection - Data []GlobalLoadBalancerPolicy `json:"data,omitempty"` -} - -type GlobalLoadBalancerPolicyClient struct { - rancherClient *RancherClient -} - -type GlobalLoadBalancerPolicyOperations interface { - List(opts *ListOpts) (*GlobalLoadBalancerPolicyCollection, error) - Create(opts *GlobalLoadBalancerPolicy) (*GlobalLoadBalancerPolicy, error) - Update(existing *GlobalLoadBalancerPolicy, updates interface{}) (*GlobalLoadBalancerPolicy, error) - ById(id string) (*GlobalLoadBalancerPolicy, error) - Delete(container *GlobalLoadBalancerPolicy) error -} - -func newGlobalLoadBalancerPolicyClient(rancherClient *RancherClient) *GlobalLoadBalancerPolicyClient { - return &GlobalLoadBalancerPolicyClient{ - rancherClient: rancherClient, - } -} - -func (c *GlobalLoadBalancerPolicyClient) Create(container *GlobalLoadBalancerPolicy) (*GlobalLoadBalancerPolicy, error) { - resp := &GlobalLoadBalancerPolicy{} - err := c.rancherClient.doCreate(GLOBAL_LOAD_BALANCER_POLICY_TYPE, container, resp) - return resp, err -} - -func (c *GlobalLoadBalancerPolicyClient) Update(existing *GlobalLoadBalancerPolicy, updates interface{}) (*GlobalLoadBalancerPolicy, error) { - resp := &GlobalLoadBalancerPolicy{} - err := c.rancherClient.doUpdate(GLOBAL_LOAD_BALANCER_POLICY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *GlobalLoadBalancerPolicyClient) List(opts *ListOpts) (*GlobalLoadBalancerPolicyCollection, error) { - resp := &GlobalLoadBalancerPolicyCollection{} - err := c.rancherClient.doList(GLOBAL_LOAD_BALANCER_POLICY_TYPE, opts, resp) - return resp, err -} - -func (c *GlobalLoadBalancerPolicyClient) ById(id string) (*GlobalLoadBalancerPolicy, error) { - resp := &GlobalLoadBalancerPolicy{} - err := c.rancherClient.doById(GLOBAL_LOAD_BALANCER_POLICY_TYPE, id, resp) - return resp, err -} - -func (c *GlobalLoadBalancerPolicyClient) Delete(container *GlobalLoadBalancerPolicy) error { - return c.rancherClient.doResourceDelete(GLOBAL_LOAD_BALANCER_POLICY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host.go deleted file mode 100644 index 5bdcb914..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host.go +++ /dev/null @@ -1,146 +0,0 @@ -package client - -const ( - HOST_TYPE = "host" -) - -type Host struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - AgentId string `json:"agentId,omitempty"` - - ApiProxy string `json:"apiProxy,omitempty"` - - ComputeTotal int `json:"computeTotal,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Info interface{} `json:"info,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - PhysicalHostId string `json:"physicalHostId,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type HostCollection struct { - Collection - Data []Host `json:"data,omitempty"` -} - -type HostClient struct { - rancherClient *RancherClient -} - -type HostOperations interface { - List(opts *ListOpts) (*HostCollection, error) - Create(opts *Host) (*Host, error) - Update(existing *Host, updates interface{}) (*Host, error) - ById(id string) (*Host, error) - Delete(container *Host) error - ActionActivate (*Host) (*Host, error) - ActionCreate (*Host) (*Host, error) - ActionDeactivate (*Host) (*Host, error) - ActionPurge (*Host) (*Host, error) - ActionRemove (*Host) (*Host, error) - ActionRestore (*Host) (*Host, error) - ActionUpdate (*Host) (*Host, error) -} - -func newHostClient(rancherClient *RancherClient) *HostClient { - return &HostClient{ - rancherClient: rancherClient, - } -} - -func (c *HostClient) Create(container *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doCreate(HOST_TYPE, container, resp) - return resp, err -} - -func (c *HostClient) Update(existing *Host, updates interface{}) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doUpdate(HOST_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *HostClient) List(opts *ListOpts) (*HostCollection, error) { - resp := &HostCollection{} - err := c.rancherClient.doList(HOST_TYPE, opts, resp) - return resp, err -} - -func (c *HostClient) ById(id string) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doById(HOST_TYPE, id, resp) - return resp, err -} - -func (c *HostClient) Delete(container *Host) error { - return c.rancherClient.doResourceDelete(HOST_TYPE, &container.Resource) -} - -func (c *HostClient) ActionActivate(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *HostClient) ActionCreate(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *HostClient) ActionDeactivate(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *HostClient) ActionPurge(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *HostClient) ActionRemove(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *HostClient) ActionRestore(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *HostClient) ActionUpdate(resource *Host) (*Host, error) { - resp := &Host{} - err := c.rancherClient.doEmptyAction(HOST_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host_access.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host_access.go deleted file mode 100644 index 6dc2946d..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_host_access.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - HOST_ACCESS_TYPE = "hostAccess" -) - -type HostAccess struct { - Resource - - Token string `json:"token,omitempty"` - - Url string `json:"url,omitempty"` - -} - -type HostAccessCollection struct { - Collection - Data []HostAccess `json:"data,omitempty"` -} - -type HostAccessClient struct { - rancherClient *RancherClient -} - -type HostAccessOperations interface { - List(opts *ListOpts) (*HostAccessCollection, error) - Create(opts *HostAccess) (*HostAccess, error) - Update(existing *HostAccess, updates interface{}) (*HostAccess, error) - ById(id string) (*HostAccess, error) - Delete(container *HostAccess) error -} - -func newHostAccessClient(rancherClient *RancherClient) *HostAccessClient { - return &HostAccessClient{ - rancherClient: rancherClient, - } -} - -func (c *HostAccessClient) Create(container *HostAccess) (*HostAccess, error) { - resp := &HostAccess{} - err := c.rancherClient.doCreate(HOST_ACCESS_TYPE, container, resp) - return resp, err -} - -func (c *HostAccessClient) Update(existing *HostAccess, updates interface{}) (*HostAccess, error) { - resp := &HostAccess{} - err := c.rancherClient.doUpdate(HOST_ACCESS_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *HostAccessClient) List(opts *ListOpts) (*HostAccessCollection, error) { - resp := &HostAccessCollection{} - err := c.rancherClient.doList(HOST_ACCESS_TYPE, opts, resp) - return resp, err -} - -func (c *HostAccessClient) ById(id string) (*HostAccess, error) { - resp := &HostAccess{} - err := c.rancherClient.doById(HOST_ACCESS_TYPE, id, resp) - return resp, err -} - -func (c *HostAccessClient) Delete(container *HostAccess) error { - return c.rancherClient.doResourceDelete(HOST_ACCESS_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_image.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_image.go deleted file mode 100644 index 334ff1ab..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_image.go +++ /dev/null @@ -1,136 +0,0 @@ -package client - -const ( - IMAGE_TYPE = "image" -) - -type Image struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ImageCollection struct { - Collection - Data []Image `json:"data,omitempty"` -} - -type ImageClient struct { - rancherClient *RancherClient -} - -type ImageOperations interface { - List(opts *ListOpts) (*ImageCollection, error) - Create(opts *Image) (*Image, error) - Update(existing *Image, updates interface{}) (*Image, error) - ById(id string) (*Image, error) - Delete(container *Image) error - ActionActivate (*Image) (*Image, error) - ActionCreate (*Image) (*Image, error) - ActionDeactivate (*Image) (*Image, error) - ActionPurge (*Image) (*Image, error) - ActionRemove (*Image) (*Image, error) - ActionRestore (*Image) (*Image, error) - ActionUpdate (*Image) (*Image, error) -} - -func newImageClient(rancherClient *RancherClient) *ImageClient { - return &ImageClient{ - rancherClient: rancherClient, - } -} - -func (c *ImageClient) Create(container *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doCreate(IMAGE_TYPE, container, resp) - return resp, err -} - -func (c *ImageClient) Update(existing *Image, updates interface{}) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doUpdate(IMAGE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ImageClient) List(opts *ListOpts) (*ImageCollection, error) { - resp := &ImageCollection{} - err := c.rancherClient.doList(IMAGE_TYPE, opts, resp) - return resp, err -} - -func (c *ImageClient) ById(id string) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doById(IMAGE_TYPE, id, resp) - return resp, err -} - -func (c *ImageClient) Delete(container *Image) error { - return c.rancherClient.doResourceDelete(IMAGE_TYPE, &container.Resource) -} - -func (c *ImageClient) ActionActivate(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *ImageClient) ActionCreate(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *ImageClient) ActionDeactivate(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *ImageClient) ActionPurge(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *ImageClient) ActionRemove(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *ImageClient) ActionRestore(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *ImageClient) ActionUpdate(resource *Image) (*Image, error) { - resp := &Image{} - err := c.rancherClient.doEmptyAction(IMAGE_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance.go deleted file mode 100644 index 72475dd6..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance.go +++ /dev/null @@ -1,157 +0,0 @@ -package client - -const ( - INSTANCE_TYPE = "instance" -) - -type Instance struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type InstanceCollection struct { - Collection - Data []Instance `json:"data,omitempty"` -} - -type InstanceClient struct { - rancherClient *RancherClient -} - -type InstanceOperations interface { - List(opts *ListOpts) (*InstanceCollection, error) - Create(opts *Instance) (*Instance, error) - Update(existing *Instance, updates interface{}) (*Instance, error) - ById(id string) (*Instance, error) - Delete(container *Instance) error - ActionAllocate (*Instance) (*Instance, error) - ActionCreate (*Instance) (*Instance, error) - ActionDeallocate (*Instance) (*Instance, error) - ActionMigrate (*Instance) (*Instance, error) - ActionPurge (*Instance) (*Instance, error) - ActionRemove (*Instance) (*Instance, error) - ActionRestart (*Instance) (*Instance, error) - ActionRestore (*Instance) (*Instance, error) - ActionStart (*Instance) (*Instance, error) - ActionUpdate (*Instance) (*Instance, error) -} - -func newInstanceClient(rancherClient *RancherClient) *InstanceClient { - return &InstanceClient{ - rancherClient: rancherClient, - } -} - -func (c *InstanceClient) Create(container *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doCreate(INSTANCE_TYPE, container, resp) - return resp, err -} - -func (c *InstanceClient) Update(existing *Instance, updates interface{}) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doUpdate(INSTANCE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *InstanceClient) List(opts *ListOpts) (*InstanceCollection, error) { - resp := &InstanceCollection{} - err := c.rancherClient.doList(INSTANCE_TYPE, opts, resp) - return resp, err -} - -func (c *InstanceClient) ById(id string) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doById(INSTANCE_TYPE, id, resp) - return resp, err -} - -func (c *InstanceClient) Delete(container *Instance) error { - return c.rancherClient.doResourceDelete(INSTANCE_TYPE, &container.Resource) -} - -func (c *InstanceClient) ActionAllocate(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "allocate", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionCreate(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionDeallocate(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "deallocate", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionMigrate(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "migrate", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionPurge(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionRemove(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionRestart(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "restart", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionRestore(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionStart(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "start", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceClient) ActionUpdate(resource *Instance) (*Instance, error) { - resp := &Instance{} - err := c.rancherClient.doEmptyAction(INSTANCE_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console.go deleted file mode 100644 index 0a594397..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -const ( - INSTANCE_CONSOLE_TYPE = "instanceConsole" -) - -type InstanceConsole struct { - Resource - - Kind string `json:"kind,omitempty"` - - Password string `json:"password,omitempty"` - - Url string `json:"url,omitempty"` - -} - -type InstanceConsoleCollection struct { - Collection - Data []InstanceConsole `json:"data,omitempty"` -} - -type InstanceConsoleClient struct { - rancherClient *RancherClient -} - -type InstanceConsoleOperations interface { - List(opts *ListOpts) (*InstanceConsoleCollection, error) - Create(opts *InstanceConsole) (*InstanceConsole, error) - Update(existing *InstanceConsole, updates interface{}) (*InstanceConsole, error) - ById(id string) (*InstanceConsole, error) - Delete(container *InstanceConsole) error -} - -func newInstanceConsoleClient(rancherClient *RancherClient) *InstanceConsoleClient { - return &InstanceConsoleClient{ - rancherClient: rancherClient, - } -} - -func (c *InstanceConsoleClient) Create(container *InstanceConsole) (*InstanceConsole, error) { - resp := &InstanceConsole{} - err := c.rancherClient.doCreate(INSTANCE_CONSOLE_TYPE, container, resp) - return resp, err -} - -func (c *InstanceConsoleClient) Update(existing *InstanceConsole, updates interface{}) (*InstanceConsole, error) { - resp := &InstanceConsole{} - err := c.rancherClient.doUpdate(INSTANCE_CONSOLE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *InstanceConsoleClient) List(opts *ListOpts) (*InstanceConsoleCollection, error) { - resp := &InstanceConsoleCollection{} - err := c.rancherClient.doList(INSTANCE_CONSOLE_TYPE, opts, resp) - return resp, err -} - -func (c *InstanceConsoleClient) ById(id string) (*InstanceConsole, error) { - resp := &InstanceConsole{} - err := c.rancherClient.doById(INSTANCE_CONSOLE_TYPE, id, resp) - return resp, err -} - -func (c *InstanceConsoleClient) Delete(container *InstanceConsole) error { - return c.rancherClient.doResourceDelete(INSTANCE_CONSOLE_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console_input.go deleted file mode 100644 index 02930037..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_console_input.go +++ /dev/null @@ -1,61 +0,0 @@ -package client - -const ( - INSTANCE_CONSOLE_INPUT_TYPE = "instanceConsoleInput" -) - -type InstanceConsoleInput struct { - Resource - -} - -type InstanceConsoleInputCollection struct { - Collection - Data []InstanceConsoleInput `json:"data,omitempty"` -} - -type InstanceConsoleInputClient struct { - rancherClient *RancherClient -} - -type InstanceConsoleInputOperations interface { - List(opts *ListOpts) (*InstanceConsoleInputCollection, error) - Create(opts *InstanceConsoleInput) (*InstanceConsoleInput, error) - Update(existing *InstanceConsoleInput, updates interface{}) (*InstanceConsoleInput, error) - ById(id string) (*InstanceConsoleInput, error) - Delete(container *InstanceConsoleInput) error -} - -func newInstanceConsoleInputClient(rancherClient *RancherClient) *InstanceConsoleInputClient { - return &InstanceConsoleInputClient{ - rancherClient: rancherClient, - } -} - -func (c *InstanceConsoleInputClient) Create(container *InstanceConsoleInput) (*InstanceConsoleInput, error) { - resp := &InstanceConsoleInput{} - err := c.rancherClient.doCreate(INSTANCE_CONSOLE_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *InstanceConsoleInputClient) Update(existing *InstanceConsoleInput, updates interface{}) (*InstanceConsoleInput, error) { - resp := &InstanceConsoleInput{} - err := c.rancherClient.doUpdate(INSTANCE_CONSOLE_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *InstanceConsoleInputClient) List(opts *ListOpts) (*InstanceConsoleInputCollection, error) { - resp := &InstanceConsoleInputCollection{} - err := c.rancherClient.doList(INSTANCE_CONSOLE_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *InstanceConsoleInputClient) ById(id string) (*InstanceConsoleInput, error) { - resp := &InstanceConsoleInput{} - err := c.rancherClient.doById(INSTANCE_CONSOLE_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *InstanceConsoleInputClient) Delete(container *InstanceConsoleInput) error { - return c.rancherClient.doResourceDelete(INSTANCE_CONSOLE_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_link.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_link.go deleted file mode 100644 index 8ef83b35..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_link.go +++ /dev/null @@ -1,144 +0,0 @@ -package client - -const ( - INSTANCE_LINK_TYPE = "instanceLink" -) - -type InstanceLink struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - InstanceId string `json:"instanceId,omitempty"` - - Kind string `json:"kind,omitempty"` - - LinkName string `json:"linkName,omitempty"` - - Name string `json:"name,omitempty"` - - Ports []interface{} `json:"ports,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - TargetInstanceId string `json:"targetInstanceId,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type InstanceLinkCollection struct { - Collection - Data []InstanceLink `json:"data,omitempty"` -} - -type InstanceLinkClient struct { - rancherClient *RancherClient -} - -type InstanceLinkOperations interface { - List(opts *ListOpts) (*InstanceLinkCollection, error) - Create(opts *InstanceLink) (*InstanceLink, error) - Update(existing *InstanceLink, updates interface{}) (*InstanceLink, error) - ById(id string) (*InstanceLink, error) - Delete(container *InstanceLink) error - ActionActivate (*InstanceLink) (*InstanceLink, error) - ActionCreate (*InstanceLink) (*InstanceLink, error) - ActionDeactivate (*InstanceLink) (*InstanceLink, error) - ActionPurge (*InstanceLink) (*InstanceLink, error) - ActionRemove (*InstanceLink) (*InstanceLink, error) - ActionRestore (*InstanceLink) (*InstanceLink, error) - ActionUpdate (*InstanceLink) (*InstanceLink, error) -} - -func newInstanceLinkClient(rancherClient *RancherClient) *InstanceLinkClient { - return &InstanceLinkClient{ - rancherClient: rancherClient, - } -} - -func (c *InstanceLinkClient) Create(container *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doCreate(INSTANCE_LINK_TYPE, container, resp) - return resp, err -} - -func (c *InstanceLinkClient) Update(existing *InstanceLink, updates interface{}) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doUpdate(INSTANCE_LINK_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *InstanceLinkClient) List(opts *ListOpts) (*InstanceLinkCollection, error) { - resp := &InstanceLinkCollection{} - err := c.rancherClient.doList(INSTANCE_LINK_TYPE, opts, resp) - return resp, err -} - -func (c *InstanceLinkClient) ById(id string) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doById(INSTANCE_LINK_TYPE, id, resp) - return resp, err -} - -func (c *InstanceLinkClient) Delete(container *InstanceLink) error { - return c.rancherClient.doResourceDelete(INSTANCE_LINK_TYPE, &container.Resource) -} - -func (c *InstanceLinkClient) ActionActivate(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceLinkClient) ActionCreate(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceLinkClient) ActionDeactivate(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceLinkClient) ActionPurge(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceLinkClient) ActionRemove(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceLinkClient) ActionRestore(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *InstanceLinkClient) ActionUpdate(resource *InstanceLink) (*InstanceLink, error) { - resp := &InstanceLink{} - err := c.rancherClient.doEmptyAction(INSTANCE_LINK_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_stop.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_stop.go deleted file mode 100644 index e3dfc37c..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_instance_stop.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -const ( - INSTANCE_STOP_TYPE = "instanceStop" -) - -type InstanceStop struct { - Resource - - DeallocateFromHost bool `json:"deallocateFromHost,omitempty"` - - Remove bool `json:"remove,omitempty"` - - Timeout int `json:"timeout,omitempty"` - -} - -type InstanceStopCollection struct { - Collection - Data []InstanceStop `json:"data,omitempty"` -} - -type InstanceStopClient struct { - rancherClient *RancherClient -} - -type InstanceStopOperations interface { - List(opts *ListOpts) (*InstanceStopCollection, error) - Create(opts *InstanceStop) (*InstanceStop, error) - Update(existing *InstanceStop, updates interface{}) (*InstanceStop, error) - ById(id string) (*InstanceStop, error) - Delete(container *InstanceStop) error -} - -func newInstanceStopClient(rancherClient *RancherClient) *InstanceStopClient { - return &InstanceStopClient{ - rancherClient: rancherClient, - } -} - -func (c *InstanceStopClient) Create(container *InstanceStop) (*InstanceStop, error) { - resp := &InstanceStop{} - err := c.rancherClient.doCreate(INSTANCE_STOP_TYPE, container, resp) - return resp, err -} - -func (c *InstanceStopClient) Update(existing *InstanceStop, updates interface{}) (*InstanceStop, error) { - resp := &InstanceStop{} - err := c.rancherClient.doUpdate(INSTANCE_STOP_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *InstanceStopClient) List(opts *ListOpts) (*InstanceStopCollection, error) { - resp := &InstanceStopCollection{} - err := c.rancherClient.doList(INSTANCE_STOP_TYPE, opts, resp) - return resp, err -} - -func (c *InstanceStopClient) ById(id string) (*InstanceStop, error) { - resp := &InstanceStop{} - err := c.rancherClient.doById(INSTANCE_STOP_TYPE, id, resp) - return resp, err -} - -func (c *InstanceStopClient) Delete(container *InstanceStop) error { - return c.rancherClient.doResourceDelete(INSTANCE_STOP_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address.go deleted file mode 100644 index 8896497b..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address.go +++ /dev/null @@ -1,147 +0,0 @@ -package client - -const ( - IP_ADDRESS_TYPE = "ipAddress" -) - -type IpAddress struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Address string `json:"address,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - NetworkId string `json:"networkId,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type IpAddressCollection struct { - Collection - Data []IpAddress `json:"data,omitempty"` -} - -type IpAddressClient struct { - rancherClient *RancherClient -} - -type IpAddressOperations interface { - List(opts *ListOpts) (*IpAddressCollection, error) - Create(opts *IpAddress) (*IpAddress, error) - Update(existing *IpAddress, updates interface{}) (*IpAddress, error) - ById(id string) (*IpAddress, error) - Delete(container *IpAddress) error - ActionActivate (*IpAddress) (*IpAddress, error) - ActionCreate (*IpAddress) (*IpAddress, error) - ActionDeactivate (*IpAddress) (*IpAddress, error) - ActionDisassociate (*IpAddress) (*IpAddress, error) - ActionPurge (*IpAddress) (*IpAddress, error) - ActionRemove (*IpAddress) (*IpAddress, error) - ActionRestore (*IpAddress) (*IpAddress, error) - ActionUpdate (*IpAddress) (*IpAddress, error) -} - -func newIpAddressClient(rancherClient *RancherClient) *IpAddressClient { - return &IpAddressClient{ - rancherClient: rancherClient, - } -} - -func (c *IpAddressClient) Create(container *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doCreate(IP_ADDRESS_TYPE, container, resp) - return resp, err -} - -func (c *IpAddressClient) Update(existing *IpAddress, updates interface{}) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doUpdate(IP_ADDRESS_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *IpAddressClient) List(opts *ListOpts) (*IpAddressCollection, error) { - resp := &IpAddressCollection{} - err := c.rancherClient.doList(IP_ADDRESS_TYPE, opts, resp) - return resp, err -} - -func (c *IpAddressClient) ById(id string) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doById(IP_ADDRESS_TYPE, id, resp) - return resp, err -} - -func (c *IpAddressClient) Delete(container *IpAddress) error { - return c.rancherClient.doResourceDelete(IP_ADDRESS_TYPE, &container.Resource) -} - -func (c *IpAddressClient) ActionActivate(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionCreate(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionDeactivate(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionDisassociate(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "disassociate", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionPurge(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionRemove(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionRestore(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *IpAddressClient) ActionUpdate(resource *IpAddress) (*IpAddress, error) { - resp := &IpAddress{} - err := c.rancherClient.doEmptyAction(IP_ADDRESS_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address_associate_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address_associate_input.go deleted file mode 100644 index ead60a7d..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_ip_address_associate_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - IP_ADDRESS_ASSOCIATE_INPUT_TYPE = "ipAddressAssociateInput" -) - -type IpAddressAssociateInput struct { - Resource - - IpAddressId string `json:"ipAddressId,omitempty"` - -} - -type IpAddressAssociateInputCollection struct { - Collection - Data []IpAddressAssociateInput `json:"data,omitempty"` -} - -type IpAddressAssociateInputClient struct { - rancherClient *RancherClient -} - -type IpAddressAssociateInputOperations interface { - List(opts *ListOpts) (*IpAddressAssociateInputCollection, error) - Create(opts *IpAddressAssociateInput) (*IpAddressAssociateInput, error) - Update(existing *IpAddressAssociateInput, updates interface{}) (*IpAddressAssociateInput, error) - ById(id string) (*IpAddressAssociateInput, error) - Delete(container *IpAddressAssociateInput) error -} - -func newIpAddressAssociateInputClient(rancherClient *RancherClient) *IpAddressAssociateInputClient { - return &IpAddressAssociateInputClient{ - rancherClient: rancherClient, - } -} - -func (c *IpAddressAssociateInputClient) Create(container *IpAddressAssociateInput) (*IpAddressAssociateInput, error) { - resp := &IpAddressAssociateInput{} - err := c.rancherClient.doCreate(IP_ADDRESS_ASSOCIATE_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *IpAddressAssociateInputClient) Update(existing *IpAddressAssociateInput, updates interface{}) (*IpAddressAssociateInput, error) { - resp := &IpAddressAssociateInput{} - err := c.rancherClient.doUpdate(IP_ADDRESS_ASSOCIATE_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *IpAddressAssociateInputClient) List(opts *ListOpts) (*IpAddressAssociateInputCollection, error) { - resp := &IpAddressAssociateInputCollection{} - err := c.rancherClient.doList(IP_ADDRESS_ASSOCIATE_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *IpAddressAssociateInputClient) ById(id string) (*IpAddressAssociateInput, error) { - resp := &IpAddressAssociateInput{} - err := c.rancherClient.doById(IP_ADDRESS_ASSOCIATE_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *IpAddressAssociateInputClient) Delete(container *IpAddressAssociateInput) error { - return c.rancherClient.doResourceDelete(IP_ADDRESS_ASSOCIATE_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer.go deleted file mode 100644 index a668b8d0..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer.go +++ /dev/null @@ -1,107 +0,0 @@ -package client - -const ( - LOAD_BALANCER_TYPE = "loadBalancer" -) - -type LoadBalancer struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - GlobalLoadBalancerId string `json:"globalLoadBalancerId,omitempty"` - - Kind string `json:"kind,omitempty"` - - LoadBalancerConfigId string `json:"loadBalancerConfigId,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - - Weight int `json:"weight,omitempty"` - -} - -type LoadBalancerCollection struct { - Collection - Data []LoadBalancer `json:"data,omitempty"` -} - -type LoadBalancerClient struct { - rancherClient *RancherClient -} - -type LoadBalancerOperations interface { - List(opts *ListOpts) (*LoadBalancerCollection, error) - Create(opts *LoadBalancer) (*LoadBalancer, error) - Update(existing *LoadBalancer, updates interface{}) (*LoadBalancer, error) - ById(id string) (*LoadBalancer, error) - Delete(container *LoadBalancer) error - ActionCreate (*LoadBalancer) (*LoadBalancer, error) - ActionRemove (*LoadBalancer) (*LoadBalancer, error) -} - -func newLoadBalancerClient(rancherClient *RancherClient) *LoadBalancerClient { - return &LoadBalancerClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerClient) Create(container *LoadBalancer) (*LoadBalancer, error) { - resp := &LoadBalancer{} - err := c.rancherClient.doCreate(LOAD_BALANCER_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerClient) Update(existing *LoadBalancer, updates interface{}) (*LoadBalancer, error) { - resp := &LoadBalancer{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerClient) List(opts *ListOpts) (*LoadBalancerCollection, error) { - resp := &LoadBalancerCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerClient) ById(id string) (*LoadBalancer, error) { - resp := &LoadBalancer{} - err := c.rancherClient.doById(LOAD_BALANCER_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerClient) Delete(container *LoadBalancer) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_TYPE, &container.Resource) -} - -func (c *LoadBalancerClient) ActionCreate(resource *LoadBalancer) (*LoadBalancer, error) { - resp := &LoadBalancer{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *LoadBalancerClient) ActionRemove(resource *LoadBalancer) (*LoadBalancer, error) { - resp := &LoadBalancer{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_TYPE, "remove", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_app_cookie_stickiness_policy.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_app_cookie_stickiness_policy.go deleted file mode 100644 index 330cfd99..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_app_cookie_stickiness_policy.go +++ /dev/null @@ -1,75 +0,0 @@ -package client - -const ( - LOAD_BALANCER_APP_COOKIE_STICKINESS_POLICY_TYPE = "loadBalancerAppCookieStickinessPolicy" -) - -type LoadBalancerAppCookieStickinessPolicy struct { - Resource - - Cookie string `json:"cookie,omitempty"` - - Length int `json:"length,omitempty"` - - Mode string `json:"mode,omitempty"` - - Name string `json:"name,omitempty"` - - Prefix bool `json:"prefix,omitempty"` - - RequestLearn bool `json:"requestLearn,omitempty"` - - Timeout int `json:"timeout,omitempty"` - -} - -type LoadBalancerAppCookieStickinessPolicyCollection struct { - Collection - Data []LoadBalancerAppCookieStickinessPolicy `json:"data,omitempty"` -} - -type LoadBalancerAppCookieStickinessPolicyClient struct { - rancherClient *RancherClient -} - -type LoadBalancerAppCookieStickinessPolicyOperations interface { - List(opts *ListOpts) (*LoadBalancerAppCookieStickinessPolicyCollection, error) - Create(opts *LoadBalancerAppCookieStickinessPolicy) (*LoadBalancerAppCookieStickinessPolicy, error) - Update(existing *LoadBalancerAppCookieStickinessPolicy, updates interface{}) (*LoadBalancerAppCookieStickinessPolicy, error) - ById(id string) (*LoadBalancerAppCookieStickinessPolicy, error) - Delete(container *LoadBalancerAppCookieStickinessPolicy) error -} - -func newLoadBalancerAppCookieStickinessPolicyClient(rancherClient *RancherClient) *LoadBalancerAppCookieStickinessPolicyClient { - return &LoadBalancerAppCookieStickinessPolicyClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerAppCookieStickinessPolicyClient) Create(container *LoadBalancerAppCookieStickinessPolicy) (*LoadBalancerAppCookieStickinessPolicy, error) { - resp := &LoadBalancerAppCookieStickinessPolicy{} - err := c.rancherClient.doCreate(LOAD_BALANCER_APP_COOKIE_STICKINESS_POLICY_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerAppCookieStickinessPolicyClient) Update(existing *LoadBalancerAppCookieStickinessPolicy, updates interface{}) (*LoadBalancerAppCookieStickinessPolicy, error) { - resp := &LoadBalancerAppCookieStickinessPolicy{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_APP_COOKIE_STICKINESS_POLICY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerAppCookieStickinessPolicyClient) List(opts *ListOpts) (*LoadBalancerAppCookieStickinessPolicyCollection, error) { - resp := &LoadBalancerAppCookieStickinessPolicyCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_APP_COOKIE_STICKINESS_POLICY_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerAppCookieStickinessPolicyClient) ById(id string) (*LoadBalancerAppCookieStickinessPolicy, error) { - resp := &LoadBalancerAppCookieStickinessPolicy{} - err := c.rancherClient.doById(LOAD_BALANCER_APP_COOKIE_STICKINESS_POLICY_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerAppCookieStickinessPolicyClient) Delete(container *LoadBalancerAppCookieStickinessPolicy) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_APP_COOKIE_STICKINESS_POLICY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_config.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_config.go deleted file mode 100644 index 9c1edc59..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_config.go +++ /dev/null @@ -1,114 +0,0 @@ -package client - -const ( - LOAD_BALANCER_CONFIG_TYPE = "loadBalancerConfig" -) - -type LoadBalancerConfig struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - AppCookieStickinessPolicy LoadBalancerAppCookieStickinessPolicy `json:"appCookieStickinessPolicy,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - HealthCheck LoadBalancerHealthCheck `json:"healthCheck,omitempty"` - - Kind string `json:"kind,omitempty"` - - LbCookieStickinessPolicy LoadBalancerCookieStickinessPolicy `json:"lbCookieStickinessPolicy,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type LoadBalancerConfigCollection struct { - Collection - Data []LoadBalancerConfig `json:"data,omitempty"` -} - -type LoadBalancerConfigClient struct { - rancherClient *RancherClient -} - -type LoadBalancerConfigOperations interface { - List(opts *ListOpts) (*LoadBalancerConfigCollection, error) - Create(opts *LoadBalancerConfig) (*LoadBalancerConfig, error) - Update(existing *LoadBalancerConfig, updates interface{}) (*LoadBalancerConfig, error) - ById(id string) (*LoadBalancerConfig, error) - Delete(container *LoadBalancerConfig) error - ActionCreate (*LoadBalancerConfig) (*LoadBalancerConfig, error) - ActionRemove (*LoadBalancerConfig) (*LoadBalancerConfig, error) - ActionUpdate (*LoadBalancerConfig) (*LoadBalancerConfig, error) -} - -func newLoadBalancerConfigClient(rancherClient *RancherClient) *LoadBalancerConfigClient { - return &LoadBalancerConfigClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerConfigClient) Create(container *LoadBalancerConfig) (*LoadBalancerConfig, error) { - resp := &LoadBalancerConfig{} - err := c.rancherClient.doCreate(LOAD_BALANCER_CONFIG_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerConfigClient) Update(existing *LoadBalancerConfig, updates interface{}) (*LoadBalancerConfig, error) { - resp := &LoadBalancerConfig{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_CONFIG_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerConfigClient) List(opts *ListOpts) (*LoadBalancerConfigCollection, error) { - resp := &LoadBalancerConfigCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_CONFIG_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerConfigClient) ById(id string) (*LoadBalancerConfig, error) { - resp := &LoadBalancerConfig{} - err := c.rancherClient.doById(LOAD_BALANCER_CONFIG_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerConfigClient) Delete(container *LoadBalancerConfig) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_CONFIG_TYPE, &container.Resource) -} - -func (c *LoadBalancerConfigClient) ActionCreate(resource *LoadBalancerConfig) (*LoadBalancerConfig, error) { - resp := &LoadBalancerConfig{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_CONFIG_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *LoadBalancerConfigClient) ActionRemove(resource *LoadBalancerConfig) (*LoadBalancerConfig, error) { - resp := &LoadBalancerConfig{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_CONFIG_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *LoadBalancerConfigClient) ActionUpdate(resource *LoadBalancerConfig) (*LoadBalancerConfig, error) { - resp := &LoadBalancerConfig{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_CONFIG_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_cookie_stickiness_policy.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_cookie_stickiness_policy.go deleted file mode 100644 index 09ebc4d7..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_cookie_stickiness_policy.go +++ /dev/null @@ -1,75 +0,0 @@ -package client - -const ( - LOAD_BALANCER_COOKIE_STICKINESS_POLICY_TYPE = "loadBalancerCookieStickinessPolicy" -) - -type LoadBalancerCookieStickinessPolicy struct { - Resource - - Cookie string `json:"cookie,omitempty"` - - Domain string `json:"domain,omitempty"` - - Indirect bool `json:"indirect,omitempty"` - - Mode string `json:"mode,omitempty"` - - Name string `json:"name,omitempty"` - - Nocache bool `json:"nocache,omitempty"` - - Postonly bool `json:"postonly,omitempty"` - -} - -type LoadBalancerCookieStickinessPolicyCollection struct { - Collection - Data []LoadBalancerCookieStickinessPolicy `json:"data,omitempty"` -} - -type LoadBalancerCookieStickinessPolicyClient struct { - rancherClient *RancherClient -} - -type LoadBalancerCookieStickinessPolicyOperations interface { - List(opts *ListOpts) (*LoadBalancerCookieStickinessPolicyCollection, error) - Create(opts *LoadBalancerCookieStickinessPolicy) (*LoadBalancerCookieStickinessPolicy, error) - Update(existing *LoadBalancerCookieStickinessPolicy, updates interface{}) (*LoadBalancerCookieStickinessPolicy, error) - ById(id string) (*LoadBalancerCookieStickinessPolicy, error) - Delete(container *LoadBalancerCookieStickinessPolicy) error -} - -func newLoadBalancerCookieStickinessPolicyClient(rancherClient *RancherClient) *LoadBalancerCookieStickinessPolicyClient { - return &LoadBalancerCookieStickinessPolicyClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerCookieStickinessPolicyClient) Create(container *LoadBalancerCookieStickinessPolicy) (*LoadBalancerCookieStickinessPolicy, error) { - resp := &LoadBalancerCookieStickinessPolicy{} - err := c.rancherClient.doCreate(LOAD_BALANCER_COOKIE_STICKINESS_POLICY_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerCookieStickinessPolicyClient) Update(existing *LoadBalancerCookieStickinessPolicy, updates interface{}) (*LoadBalancerCookieStickinessPolicy, error) { - resp := &LoadBalancerCookieStickinessPolicy{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_COOKIE_STICKINESS_POLICY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerCookieStickinessPolicyClient) List(opts *ListOpts) (*LoadBalancerCookieStickinessPolicyCollection, error) { - resp := &LoadBalancerCookieStickinessPolicyCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_COOKIE_STICKINESS_POLICY_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerCookieStickinessPolicyClient) ById(id string) (*LoadBalancerCookieStickinessPolicy, error) { - resp := &LoadBalancerCookieStickinessPolicy{} - err := c.rancherClient.doById(LOAD_BALANCER_COOKIE_STICKINESS_POLICY_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerCookieStickinessPolicyClient) Delete(container *LoadBalancerCookieStickinessPolicy) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_COOKIE_STICKINESS_POLICY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_health_check.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_health_check.go deleted file mode 100644 index edf5b6df..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_health_check.go +++ /dev/null @@ -1,73 +0,0 @@ -package client - -const ( - LOAD_BALANCER_HEALTH_CHECK_TYPE = "loadBalancerHealthCheck" -) - -type LoadBalancerHealthCheck struct { - Resource - - HealthyThreshold int `json:"healthyThreshold,omitempty"` - - Interval int `json:"interval,omitempty"` - - Name string `json:"name,omitempty"` - - ResponseTimeout int `json:"responseTimeout,omitempty"` - - UnhealthyThreshold int `json:"unhealthyThreshold,omitempty"` - - Uri string `json:"uri,omitempty"` - -} - -type LoadBalancerHealthCheckCollection struct { - Collection - Data []LoadBalancerHealthCheck `json:"data,omitempty"` -} - -type LoadBalancerHealthCheckClient struct { - rancherClient *RancherClient -} - -type LoadBalancerHealthCheckOperations interface { - List(opts *ListOpts) (*LoadBalancerHealthCheckCollection, error) - Create(opts *LoadBalancerHealthCheck) (*LoadBalancerHealthCheck, error) - Update(existing *LoadBalancerHealthCheck, updates interface{}) (*LoadBalancerHealthCheck, error) - ById(id string) (*LoadBalancerHealthCheck, error) - Delete(container *LoadBalancerHealthCheck) error -} - -func newLoadBalancerHealthCheckClient(rancherClient *RancherClient) *LoadBalancerHealthCheckClient { - return &LoadBalancerHealthCheckClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerHealthCheckClient) Create(container *LoadBalancerHealthCheck) (*LoadBalancerHealthCheck, error) { - resp := &LoadBalancerHealthCheck{} - err := c.rancherClient.doCreate(LOAD_BALANCER_HEALTH_CHECK_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerHealthCheckClient) Update(existing *LoadBalancerHealthCheck, updates interface{}) (*LoadBalancerHealthCheck, error) { - resp := &LoadBalancerHealthCheck{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_HEALTH_CHECK_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerHealthCheckClient) List(opts *ListOpts) (*LoadBalancerHealthCheckCollection, error) { - resp := &LoadBalancerHealthCheckCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_HEALTH_CHECK_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerHealthCheckClient) ById(id string) (*LoadBalancerHealthCheck, error) { - resp := &LoadBalancerHealthCheck{} - err := c.rancherClient.doById(LOAD_BALANCER_HEALTH_CHECK_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerHealthCheckClient) Delete(container *LoadBalancerHealthCheck) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_HEALTH_CHECK_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_listener.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_listener.go deleted file mode 100644 index 95f0a65e..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_listener.go +++ /dev/null @@ -1,111 +0,0 @@ -package client - -const ( - LOAD_BALANCER_LISTENER_TYPE = "loadBalancerListener" -) - -type LoadBalancerListener struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Algorithm string `json:"algorithm,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - SourcePort int `json:"sourcePort,omitempty"` - - SourceProtocol string `json:"sourceProtocol,omitempty"` - - State string `json:"state,omitempty"` - - TargetPort int `json:"targetPort,omitempty"` - - TargetProtocol string `json:"targetProtocol,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type LoadBalancerListenerCollection struct { - Collection - Data []LoadBalancerListener `json:"data,omitempty"` -} - -type LoadBalancerListenerClient struct { - rancherClient *RancherClient -} - -type LoadBalancerListenerOperations interface { - List(opts *ListOpts) (*LoadBalancerListenerCollection, error) - Create(opts *LoadBalancerListener) (*LoadBalancerListener, error) - Update(existing *LoadBalancerListener, updates interface{}) (*LoadBalancerListener, error) - ById(id string) (*LoadBalancerListener, error) - Delete(container *LoadBalancerListener) error - ActionCreate (*LoadBalancerListener) (*LoadBalancerListener, error) - ActionRemove (*LoadBalancerListener) (*LoadBalancerListener, error) -} - -func newLoadBalancerListenerClient(rancherClient *RancherClient) *LoadBalancerListenerClient { - return &LoadBalancerListenerClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerListenerClient) Create(container *LoadBalancerListener) (*LoadBalancerListener, error) { - resp := &LoadBalancerListener{} - err := c.rancherClient.doCreate(LOAD_BALANCER_LISTENER_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerListenerClient) Update(existing *LoadBalancerListener, updates interface{}) (*LoadBalancerListener, error) { - resp := &LoadBalancerListener{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_LISTENER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerListenerClient) List(opts *ListOpts) (*LoadBalancerListenerCollection, error) { - resp := &LoadBalancerListenerCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_LISTENER_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerListenerClient) ById(id string) (*LoadBalancerListener, error) { - resp := &LoadBalancerListener{} - err := c.rancherClient.doById(LOAD_BALANCER_LISTENER_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerListenerClient) Delete(container *LoadBalancerListener) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_LISTENER_TYPE, &container.Resource) -} - -func (c *LoadBalancerListenerClient) ActionCreate(resource *LoadBalancerListener) (*LoadBalancerListener, error) { - resp := &LoadBalancerListener{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_LISTENER_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *LoadBalancerListenerClient) ActionRemove(resource *LoadBalancerListener) (*LoadBalancerListener, error) { - resp := &LoadBalancerListener{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_LISTENER_TYPE, "remove", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_policy.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_policy.go deleted file mode 100644 index 40f79e74..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_policy.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - LOAD_BALANCER_POLICY_TYPE = "loadBalancerPolicy" -) - -type LoadBalancerPolicy struct { - Resource - - Name string `json:"name,omitempty"` - -} - -type LoadBalancerPolicyCollection struct { - Collection - Data []LoadBalancerPolicy `json:"data,omitempty"` -} - -type LoadBalancerPolicyClient struct { - rancherClient *RancherClient -} - -type LoadBalancerPolicyOperations interface { - List(opts *ListOpts) (*LoadBalancerPolicyCollection, error) - Create(opts *LoadBalancerPolicy) (*LoadBalancerPolicy, error) - Update(existing *LoadBalancerPolicy, updates interface{}) (*LoadBalancerPolicy, error) - ById(id string) (*LoadBalancerPolicy, error) - Delete(container *LoadBalancerPolicy) error -} - -func newLoadBalancerPolicyClient(rancherClient *RancherClient) *LoadBalancerPolicyClient { - return &LoadBalancerPolicyClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerPolicyClient) Create(container *LoadBalancerPolicy) (*LoadBalancerPolicy, error) { - resp := &LoadBalancerPolicy{} - err := c.rancherClient.doCreate(LOAD_BALANCER_POLICY_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerPolicyClient) Update(existing *LoadBalancerPolicy, updates interface{}) (*LoadBalancerPolicy, error) { - resp := &LoadBalancerPolicy{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_POLICY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerPolicyClient) List(opts *ListOpts) (*LoadBalancerPolicyCollection, error) { - resp := &LoadBalancerPolicyCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_POLICY_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerPolicyClient) ById(id string) (*LoadBalancerPolicy, error) { - resp := &LoadBalancerPolicy{} - err := c.rancherClient.doById(LOAD_BALANCER_POLICY_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerPolicyClient) Delete(container *LoadBalancerPolicy) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_POLICY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_target.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_target.go deleted file mode 100644 index 75334c9d..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_load_balancer_target.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -const ( - LOAD_BALANCER_TARGET_TYPE = "loadBalancerTarget" -) - -type LoadBalancerTarget struct { - Resource - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - InstanceId string `json:"instanceId,omitempty"` - - IpAddress string `json:"ipAddress,omitempty"` - - Kind string `json:"kind,omitempty"` - - LoadBalancerId string `json:"loadBalancerId,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type LoadBalancerTargetCollection struct { - Collection - Data []LoadBalancerTarget `json:"data,omitempty"` -} - -type LoadBalancerTargetClient struct { - rancherClient *RancherClient -} - -type LoadBalancerTargetOperations interface { - List(opts *ListOpts) (*LoadBalancerTargetCollection, error) - Create(opts *LoadBalancerTarget) (*LoadBalancerTarget, error) - Update(existing *LoadBalancerTarget, updates interface{}) (*LoadBalancerTarget, error) - ById(id string) (*LoadBalancerTarget, error) - Delete(container *LoadBalancerTarget) error - ActionCreate (*LoadBalancerTarget) (*LoadBalancerTarget, error) - ActionRemove (*LoadBalancerTarget) (*LoadBalancerTarget, error) -} - -func newLoadBalancerTargetClient(rancherClient *RancherClient) *LoadBalancerTargetClient { - return &LoadBalancerTargetClient{ - rancherClient: rancherClient, - } -} - -func (c *LoadBalancerTargetClient) Create(container *LoadBalancerTarget) (*LoadBalancerTarget, error) { - resp := &LoadBalancerTarget{} - err := c.rancherClient.doCreate(LOAD_BALANCER_TARGET_TYPE, container, resp) - return resp, err -} - -func (c *LoadBalancerTargetClient) Update(existing *LoadBalancerTarget, updates interface{}) (*LoadBalancerTarget, error) { - resp := &LoadBalancerTarget{} - err := c.rancherClient.doUpdate(LOAD_BALANCER_TARGET_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *LoadBalancerTargetClient) List(opts *ListOpts) (*LoadBalancerTargetCollection, error) { - resp := &LoadBalancerTargetCollection{} - err := c.rancherClient.doList(LOAD_BALANCER_TARGET_TYPE, opts, resp) - return resp, err -} - -func (c *LoadBalancerTargetClient) ById(id string) (*LoadBalancerTarget, error) { - resp := &LoadBalancerTarget{} - err := c.rancherClient.doById(LOAD_BALANCER_TARGET_TYPE, id, resp) - return resp, err -} - -func (c *LoadBalancerTargetClient) Delete(container *LoadBalancerTarget) error { - return c.rancherClient.doResourceDelete(LOAD_BALANCER_TARGET_TYPE, &container.Resource) -} - -func (c *LoadBalancerTargetClient) ActionCreate(resource *LoadBalancerTarget) (*LoadBalancerTarget, error) { - resp := &LoadBalancerTarget{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_TARGET_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *LoadBalancerTargetClient) ActionRemove(resource *LoadBalancerTarget) (*LoadBalancerTarget, error) { - resp := &LoadBalancerTarget{} - err := c.rancherClient.doEmptyAction(LOAD_BALANCER_TARGET_TYPE, "remove", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_machine.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_machine.go deleted file mode 100644 index 7ed043f5..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_machine.go +++ /dev/null @@ -1,129 +0,0 @@ -package client - -const ( - MACHINE_TYPE = "machine" -) - -type Machine struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - AuthCertificateAuthority string `json:"authCertificateAuthority,omitempty"` - - AuthKey string `json:"authKey,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - DigitaloceanConfig DigitaloceanConfig `json:"digitaloceanConfig,omitempty"` - - Driver string `json:"driver,omitempty"` - - ExternalId string `json:"externalId,omitempty"` - - ExtractedConfig string `json:"extractedConfig,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - - VirtualboxConfig VirtualboxConfig `json:"virtualboxConfig,omitempty"` - -} - -type MachineCollection struct { - Collection - Data []Machine `json:"data,omitempty"` -} - -type MachineClient struct { - rancherClient *RancherClient -} - -type MachineOperations interface { - List(opts *ListOpts) (*MachineCollection, error) - Create(opts *Machine) (*Machine, error) - Update(existing *Machine, updates interface{}) (*Machine, error) - ById(id string) (*Machine, error) - Delete(container *Machine) error - ActionBootstrap (*Machine) (*PhysicalHost, error) - ActionCreate (*Machine) (*PhysicalHost, error) - ActionRemove (*Machine) (*PhysicalHost, error) - ActionUpdate (*Machine) (*PhysicalHost, error) -} - -func newMachineClient(rancherClient *RancherClient) *MachineClient { - return &MachineClient{ - rancherClient: rancherClient, - } -} - -func (c *MachineClient) Create(container *Machine) (*Machine, error) { - resp := &Machine{} - err := c.rancherClient.doCreate(MACHINE_TYPE, container, resp) - return resp, err -} - -func (c *MachineClient) Update(existing *Machine, updates interface{}) (*Machine, error) { - resp := &Machine{} - err := c.rancherClient.doUpdate(MACHINE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *MachineClient) List(opts *ListOpts) (*MachineCollection, error) { - resp := &MachineCollection{} - err := c.rancherClient.doList(MACHINE_TYPE, opts, resp) - return resp, err -} - -func (c *MachineClient) ById(id string) (*Machine, error) { - resp := &Machine{} - err := c.rancherClient.doById(MACHINE_TYPE, id, resp) - return resp, err -} - -func (c *MachineClient) Delete(container *Machine) error { - return c.rancherClient.doResourceDelete(MACHINE_TYPE, &container.Resource) -} - -func (c *MachineClient) ActionBootstrap(resource *Machine) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(MACHINE_TYPE, "bootstrap", &resource.Resource, resp) - return resp, err -} - -func (c *MachineClient) ActionCreate(resource *Machine) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(MACHINE_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *MachineClient) ActionRemove(resource *Machine) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(MACHINE_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *MachineClient) ActionUpdate(resource *Machine) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(MACHINE_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_mount.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_mount.go deleted file mode 100644 index d2c05d2d..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_mount.go +++ /dev/null @@ -1,144 +0,0 @@ -package client - -const ( - MOUNT_TYPE = "mount" -) - -type Mount struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - InstanceId string `json:"instanceId,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - Path string `json:"path,omitempty"` - - Permissions string `json:"permissions,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - - VolumeId string `json:"volumeId,omitempty"` - -} - -type MountCollection struct { - Collection - Data []Mount `json:"data,omitempty"` -} - -type MountClient struct { - rancherClient *RancherClient -} - -type MountOperations interface { - List(opts *ListOpts) (*MountCollection, error) - Create(opts *Mount) (*Mount, error) - Update(existing *Mount, updates interface{}) (*Mount, error) - ById(id string) (*Mount, error) - Delete(container *Mount) error - ActionActivate (*Mount) (*Mount, error) - ActionCreate (*Mount) (*Mount, error) - ActionDeactivate (*Mount) (*Mount, error) - ActionPurge (*Mount) (*Mount, error) - ActionRemove (*Mount) (*Mount, error) - ActionRestore (*Mount) (*Mount, error) - ActionUpdate (*Mount) (*Mount, error) -} - -func newMountClient(rancherClient *RancherClient) *MountClient { - return &MountClient{ - rancherClient: rancherClient, - } -} - -func (c *MountClient) Create(container *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doCreate(MOUNT_TYPE, container, resp) - return resp, err -} - -func (c *MountClient) Update(existing *Mount, updates interface{}) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doUpdate(MOUNT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *MountClient) List(opts *ListOpts) (*MountCollection, error) { - resp := &MountCollection{} - err := c.rancherClient.doList(MOUNT_TYPE, opts, resp) - return resp, err -} - -func (c *MountClient) ById(id string) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doById(MOUNT_TYPE, id, resp) - return resp, err -} - -func (c *MountClient) Delete(container *Mount) error { - return c.rancherClient.doResourceDelete(MOUNT_TYPE, &container.Resource) -} - -func (c *MountClient) ActionActivate(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *MountClient) ActionCreate(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *MountClient) ActionDeactivate(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *MountClient) ActionPurge(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *MountClient) ActionRemove(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *MountClient) ActionRestore(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *MountClient) ActionUpdate(resource *Mount) (*Mount, error) { - resp := &Mount{} - err := c.rancherClient.doEmptyAction(MOUNT_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_network.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_network.go deleted file mode 100644 index b950233b..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_network.go +++ /dev/null @@ -1,136 +0,0 @@ -package client - -const ( - NETWORK_TYPE = "network" -) - -type Network struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type NetworkCollection struct { - Collection - Data []Network `json:"data,omitempty"` -} - -type NetworkClient struct { - rancherClient *RancherClient -} - -type NetworkOperations interface { - List(opts *ListOpts) (*NetworkCollection, error) - Create(opts *Network) (*Network, error) - Update(existing *Network, updates interface{}) (*Network, error) - ById(id string) (*Network, error) - Delete(container *Network) error - ActionActivate (*Network) (*Network, error) - ActionCreate (*Network) (*Network, error) - ActionDeactivate (*Network) (*Network, error) - ActionPurge (*Network) (*Network, error) - ActionRemove (*Network) (*Network, error) - ActionRestore (*Network) (*Network, error) - ActionUpdate (*Network) (*Network, error) -} - -func newNetworkClient(rancherClient *RancherClient) *NetworkClient { - return &NetworkClient{ - rancherClient: rancherClient, - } -} - -func (c *NetworkClient) Create(container *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doCreate(NETWORK_TYPE, container, resp) - return resp, err -} - -func (c *NetworkClient) Update(existing *Network, updates interface{}) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doUpdate(NETWORK_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *NetworkClient) List(opts *ListOpts) (*NetworkCollection, error) { - resp := &NetworkCollection{} - err := c.rancherClient.doList(NETWORK_TYPE, opts, resp) - return resp, err -} - -func (c *NetworkClient) ById(id string) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doById(NETWORK_TYPE, id, resp) - return resp, err -} - -func (c *NetworkClient) Delete(container *Network) error { - return c.rancherClient.doResourceDelete(NETWORK_TYPE, &container.Resource) -} - -func (c *NetworkClient) ActionActivate(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *NetworkClient) ActionCreate(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *NetworkClient) ActionDeactivate(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *NetworkClient) ActionPurge(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *NetworkClient) ActionRemove(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *NetworkClient) ActionRestore(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *NetworkClient) ActionUpdate(resource *Network) (*Network, error) { - resp := &Network{} - err := c.rancherClient.doEmptyAction(NETWORK_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_physical_host.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_physical_host.go deleted file mode 100644 index 9243d42d..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_physical_host.go +++ /dev/null @@ -1,115 +0,0 @@ -package client - -const ( - PHYSICAL_HOST_TYPE = "physicalHost" -) - -type PhysicalHost struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type PhysicalHostCollection struct { - Collection - Data []PhysicalHost `json:"data,omitempty"` -} - -type PhysicalHostClient struct { - rancherClient *RancherClient -} - -type PhysicalHostOperations interface { - List(opts *ListOpts) (*PhysicalHostCollection, error) - Create(opts *PhysicalHost) (*PhysicalHost, error) - Update(existing *PhysicalHost, updates interface{}) (*PhysicalHost, error) - ById(id string) (*PhysicalHost, error) - Delete(container *PhysicalHost) error - ActionBootstrap (*PhysicalHost) (*PhysicalHost, error) - ActionCreate (*PhysicalHost) (*PhysicalHost, error) - ActionRemove (*PhysicalHost) (*PhysicalHost, error) - ActionUpdate (*PhysicalHost) (*PhysicalHost, error) -} - -func newPhysicalHostClient(rancherClient *RancherClient) *PhysicalHostClient { - return &PhysicalHostClient{ - rancherClient: rancherClient, - } -} - -func (c *PhysicalHostClient) Create(container *PhysicalHost) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doCreate(PHYSICAL_HOST_TYPE, container, resp) - return resp, err -} - -func (c *PhysicalHostClient) Update(existing *PhysicalHost, updates interface{}) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doUpdate(PHYSICAL_HOST_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *PhysicalHostClient) List(opts *ListOpts) (*PhysicalHostCollection, error) { - resp := &PhysicalHostCollection{} - err := c.rancherClient.doList(PHYSICAL_HOST_TYPE, opts, resp) - return resp, err -} - -func (c *PhysicalHostClient) ById(id string) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doById(PHYSICAL_HOST_TYPE, id, resp) - return resp, err -} - -func (c *PhysicalHostClient) Delete(container *PhysicalHost) error { - return c.rancherClient.doResourceDelete(PHYSICAL_HOST_TYPE, &container.Resource) -} - -func (c *PhysicalHostClient) ActionBootstrap(resource *PhysicalHost) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(PHYSICAL_HOST_TYPE, "bootstrap", &resource.Resource, resp) - return resp, err -} - -func (c *PhysicalHostClient) ActionCreate(resource *PhysicalHost) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(PHYSICAL_HOST_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *PhysicalHostClient) ActionRemove(resource *PhysicalHost) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(PHYSICAL_HOST_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *PhysicalHostClient) ActionUpdate(resource *PhysicalHost) (*PhysicalHost, error) { - resp := &PhysicalHost{} - err := c.rancherClient.doEmptyAction(PHYSICAL_HOST_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_port.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_port.go deleted file mode 100644 index bb6391ab..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_port.go +++ /dev/null @@ -1,148 +0,0 @@ -package client - -const ( - PORT_TYPE = "port" -) - -type Port struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - InstanceId string `json:"instanceId,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - PrivateIpAddressId string `json:"privateIpAddressId,omitempty"` - - PrivatePort int `json:"privatePort,omitempty"` - - Protocol string `json:"protocol,omitempty"` - - PublicIpAddressId string `json:"publicIpAddressId,omitempty"` - - PublicPort int `json:"publicPort,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type PortCollection struct { - Collection - Data []Port `json:"data,omitempty"` -} - -type PortClient struct { - rancherClient *RancherClient -} - -type PortOperations interface { - List(opts *ListOpts) (*PortCollection, error) - Create(opts *Port) (*Port, error) - Update(existing *Port, updates interface{}) (*Port, error) - ById(id string) (*Port, error) - Delete(container *Port) error - ActionActivate (*Port) (*Port, error) - ActionCreate (*Port) (*Port, error) - ActionDeactivate (*Port) (*Port, error) - ActionPurge (*Port) (*Port, error) - ActionRemove (*Port) (*Port, error) - ActionRestore (*Port) (*Port, error) - ActionUpdate (*Port) (*Port, error) -} - -func newPortClient(rancherClient *RancherClient) *PortClient { - return &PortClient{ - rancherClient: rancherClient, - } -} - -func (c *PortClient) Create(container *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doCreate(PORT_TYPE, container, resp) - return resp, err -} - -func (c *PortClient) Update(existing *Port, updates interface{}) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doUpdate(PORT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *PortClient) List(opts *ListOpts) (*PortCollection, error) { - resp := &PortCollection{} - err := c.rancherClient.doList(PORT_TYPE, opts, resp) - return resp, err -} - -func (c *PortClient) ById(id string) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doById(PORT_TYPE, id, resp) - return resp, err -} - -func (c *PortClient) Delete(container *Port) error { - return c.rancherClient.doResourceDelete(PORT_TYPE, &container.Resource) -} - -func (c *PortClient) ActionActivate(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *PortClient) ActionCreate(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *PortClient) ActionDeactivate(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *PortClient) ActionPurge(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *PortClient) ActionRemove(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *PortClient) ActionRestore(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *PortClient) ActionUpdate(resource *Port) (*Port, error) { - resp := &Port{} - err := c.rancherClient.doEmptyAction(PORT_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_definition.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_definition.go deleted file mode 100644 index b24b8e5f..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_definition.go +++ /dev/null @@ -1,73 +0,0 @@ -package client - -const ( - PROCESS_DEFINITION_TYPE = "processDefinition" -) - -type ProcessDefinition struct { - Resource - - ExtensionBased bool `json:"extensionBased,omitempty"` - - Name string `json:"name,omitempty"` - - PostProcessListeners interface{} `json:"postProcessListeners,omitempty"` - - PreProcessListeners interface{} `json:"preProcessListeners,omitempty"` - - ProcessHandlers interface{} `json:"processHandlers,omitempty"` - - ResourceType string `json:"resourceType,omitempty"` - -} - -type ProcessDefinitionCollection struct { - Collection - Data []ProcessDefinition `json:"data,omitempty"` -} - -type ProcessDefinitionClient struct { - rancherClient *RancherClient -} - -type ProcessDefinitionOperations interface { - List(opts *ListOpts) (*ProcessDefinitionCollection, error) - Create(opts *ProcessDefinition) (*ProcessDefinition, error) - Update(existing *ProcessDefinition, updates interface{}) (*ProcessDefinition, error) - ById(id string) (*ProcessDefinition, error) - Delete(container *ProcessDefinition) error -} - -func newProcessDefinitionClient(rancherClient *RancherClient) *ProcessDefinitionClient { - return &ProcessDefinitionClient{ - rancherClient: rancherClient, - } -} - -func (c *ProcessDefinitionClient) Create(container *ProcessDefinition) (*ProcessDefinition, error) { - resp := &ProcessDefinition{} - err := c.rancherClient.doCreate(PROCESS_DEFINITION_TYPE, container, resp) - return resp, err -} - -func (c *ProcessDefinitionClient) Update(existing *ProcessDefinition, updates interface{}) (*ProcessDefinition, error) { - resp := &ProcessDefinition{} - err := c.rancherClient.doUpdate(PROCESS_DEFINITION_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ProcessDefinitionClient) List(opts *ListOpts) (*ProcessDefinitionCollection, error) { - resp := &ProcessDefinitionCollection{} - err := c.rancherClient.doList(PROCESS_DEFINITION_TYPE, opts, resp) - return resp, err -} - -func (c *ProcessDefinitionClient) ById(id string) (*ProcessDefinition, error) { - resp := &ProcessDefinition{} - err := c.rancherClient.doById(PROCESS_DEFINITION_TYPE, id, resp) - return resp, err -} - -func (c *ProcessDefinitionClient) Delete(container *ProcessDefinition) error { - return c.rancherClient.doResourceDelete(PROCESS_DEFINITION_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_execution.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_execution.go deleted file mode 100644 index 4c90334f..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_execution.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -const ( - PROCESS_EXECUTION_TYPE = "processExecution" -) - -type ProcessExecution struct { - Resource - - Log map[string]interface{} `json:"log,omitempty"` - - ProcessInstanceId string `json:"processInstanceId,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ProcessExecutionCollection struct { - Collection - Data []ProcessExecution `json:"data,omitempty"` -} - -type ProcessExecutionClient struct { - rancherClient *RancherClient -} - -type ProcessExecutionOperations interface { - List(opts *ListOpts) (*ProcessExecutionCollection, error) - Create(opts *ProcessExecution) (*ProcessExecution, error) - Update(existing *ProcessExecution, updates interface{}) (*ProcessExecution, error) - ById(id string) (*ProcessExecution, error) - Delete(container *ProcessExecution) error -} - -func newProcessExecutionClient(rancherClient *RancherClient) *ProcessExecutionClient { - return &ProcessExecutionClient{ - rancherClient: rancherClient, - } -} - -func (c *ProcessExecutionClient) Create(container *ProcessExecution) (*ProcessExecution, error) { - resp := &ProcessExecution{} - err := c.rancherClient.doCreate(PROCESS_EXECUTION_TYPE, container, resp) - return resp, err -} - -func (c *ProcessExecutionClient) Update(existing *ProcessExecution, updates interface{}) (*ProcessExecution, error) { - resp := &ProcessExecution{} - err := c.rancherClient.doUpdate(PROCESS_EXECUTION_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ProcessExecutionClient) List(opts *ListOpts) (*ProcessExecutionCollection, error) { - resp := &ProcessExecutionCollection{} - err := c.rancherClient.doList(PROCESS_EXECUTION_TYPE, opts, resp) - return resp, err -} - -func (c *ProcessExecutionClient) ById(id string) (*ProcessExecution, error) { - resp := &ProcessExecution{} - err := c.rancherClient.doById(PROCESS_EXECUTION_TYPE, id, resp) - return resp, err -} - -func (c *ProcessExecutionClient) Delete(container *ProcessExecution) error { - return c.rancherClient.doResourceDelete(PROCESS_EXECUTION_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_instance.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_instance.go deleted file mode 100644 index e505f82e..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_process_instance.go +++ /dev/null @@ -1,85 +0,0 @@ -package client - -const ( - PROCESS_INSTANCE_TYPE = "processInstance" -) - -type ProcessInstance struct { - Resource - - Data map[string]interface{} `json:"data,omitempty"` - - EndTime string `json:"endTime,omitempty"` - - ExitReason string `json:"exitReason,omitempty"` - - Phase string `json:"phase,omitempty"` - - Priority int `json:"priority,omitempty"` - - ProcessName string `json:"processName,omitempty"` - - ResourceId string `json:"resourceId,omitempty"` - - ResourceType string `json:"resourceType,omitempty"` - - Result string `json:"result,omitempty"` - - RunningProcessServerId string `json:"runningProcessServerId,omitempty"` - - StartProcessServerId string `json:"startProcessServerId,omitempty"` - - StartTime string `json:"startTime,omitempty"` - -} - -type ProcessInstanceCollection struct { - Collection - Data []ProcessInstance `json:"data,omitempty"` -} - -type ProcessInstanceClient struct { - rancherClient *RancherClient -} - -type ProcessInstanceOperations interface { - List(opts *ListOpts) (*ProcessInstanceCollection, error) - Create(opts *ProcessInstance) (*ProcessInstance, error) - Update(existing *ProcessInstance, updates interface{}) (*ProcessInstance, error) - ById(id string) (*ProcessInstance, error) - Delete(container *ProcessInstance) error -} - -func newProcessInstanceClient(rancherClient *RancherClient) *ProcessInstanceClient { - return &ProcessInstanceClient{ - rancherClient: rancherClient, - } -} - -func (c *ProcessInstanceClient) Create(container *ProcessInstance) (*ProcessInstance, error) { - resp := &ProcessInstance{} - err := c.rancherClient.doCreate(PROCESS_INSTANCE_TYPE, container, resp) - return resp, err -} - -func (c *ProcessInstanceClient) Update(existing *ProcessInstance, updates interface{}) (*ProcessInstance, error) { - resp := &ProcessInstance{} - err := c.rancherClient.doUpdate(PROCESS_INSTANCE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ProcessInstanceClient) List(opts *ListOpts) (*ProcessInstanceCollection, error) { - resp := &ProcessInstanceCollection{} - err := c.rancherClient.doList(PROCESS_INSTANCE_TYPE, opts, resp) - return resp, err -} - -func (c *ProcessInstanceClient) ById(id string) (*ProcessInstance, error) { - resp := &ProcessInstance{} - err := c.rancherClient.doById(PROCESS_INSTANCE_TYPE, id, resp) - return resp, err -} - -func (c *ProcessInstanceClient) Delete(container *ProcessInstance) error { - return c.rancherClient.doResourceDelete(PROCESS_INSTANCE_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_project.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_project.go deleted file mode 100644 index 69e3a999..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_project.go +++ /dev/null @@ -1,89 +0,0 @@ -package client - -const ( - PROJECT_TYPE = "project" -) - -type Project struct { - Resource - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - ExternalId string `json:"externalId,omitempty"` - - ExternalIdType string `json:"externalIdType,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type ProjectCollection struct { - Collection - Data []Project `json:"data,omitempty"` -} - -type ProjectClient struct { - rancherClient *RancherClient -} - -type ProjectOperations interface { - List(opts *ListOpts) (*ProjectCollection, error) - Create(opts *Project) (*Project, error) - Update(existing *Project, updates interface{}) (*Project, error) - ById(id string) (*Project, error) - Delete(container *Project) error -} - -func newProjectClient(rancherClient *RancherClient) *ProjectClient { - return &ProjectClient{ - rancherClient: rancherClient, - } -} - -func (c *ProjectClient) Create(container *Project) (*Project, error) { - resp := &Project{} - err := c.rancherClient.doCreate(PROJECT_TYPE, container, resp) - return resp, err -} - -func (c *ProjectClient) Update(existing *Project, updates interface{}) (*Project, error) { - resp := &Project{} - err := c.rancherClient.doUpdate(PROJECT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ProjectClient) List(opts *ListOpts) (*ProjectCollection, error) { - resp := &ProjectCollection{} - err := c.rancherClient.doList(PROJECT_TYPE, opts, resp) - return resp, err -} - -func (c *ProjectClient) ById(id string) (*Project, error) { - resp := &Project{} - err := c.rancherClient.doById(PROJECT_TYPE, id, resp) - return resp, err -} - -func (c *ProjectClient) Delete(container *Project) error { - return c.rancherClient.doResourceDelete(PROJECT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_publish.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_publish.go deleted file mode 100644 index d5be12df..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_publish.go +++ /dev/null @@ -1,83 +0,0 @@ -package client - -const ( - PUBLISH_TYPE = "publish" -) - -type Publish struct { - Resource - - Data map[string]interface{} `json:"data,omitempty"` - - Name string `json:"name,omitempty"` - - PreviousIds []string `json:"previousIds,omitempty"` - - Publisher string `json:"publisher,omitempty"` - - ResourceId string `json:"resourceId,omitempty"` - - ResourceType string `json:"resourceType,omitempty"` - - Time int `json:"time,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningInternalMessage string `json:"transitioningInternalMessage,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - -} - -type PublishCollection struct { - Collection - Data []Publish `json:"data,omitempty"` -} - -type PublishClient struct { - rancherClient *RancherClient -} - -type PublishOperations interface { - List(opts *ListOpts) (*PublishCollection, error) - Create(opts *Publish) (*Publish, error) - Update(existing *Publish, updates interface{}) (*Publish, error) - ById(id string) (*Publish, error) - Delete(container *Publish) error -} - -func newPublishClient(rancherClient *RancherClient) *PublishClient { - return &PublishClient{ - rancherClient: rancherClient, - } -} - -func (c *PublishClient) Create(container *Publish) (*Publish, error) { - resp := &Publish{} - err := c.rancherClient.doCreate(PUBLISH_TYPE, container, resp) - return resp, err -} - -func (c *PublishClient) Update(existing *Publish, updates interface{}) (*Publish, error) { - resp := &Publish{} - err := c.rancherClient.doUpdate(PUBLISH_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *PublishClient) List(opts *ListOpts) (*PublishCollection, error) { - resp := &PublishCollection{} - err := c.rancherClient.doList(PUBLISH_TYPE, opts, resp) - return resp, err -} - -func (c *PublishClient) ById(id string) (*Publish, error) { - resp := &Publish{} - err := c.rancherClient.doById(PUBLISH_TYPE, id, resp) - return resp, err -} - -func (c *PublishClient) Delete(container *Publish) error { - return c.rancherClient.doResourceDelete(PUBLISH_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_register.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_register.go deleted file mode 100644 index 9e22fa67..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_register.go +++ /dev/null @@ -1,93 +0,0 @@ -package client - -const ( - REGISTER_TYPE = "register" -) - -type Register struct { - Resource - - AccessKey string `json:"accessKey,omitempty"` - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Key string `json:"key,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - SecretKey string `json:"secretKey,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type RegisterCollection struct { - Collection - Data []Register `json:"data,omitempty"` -} - -type RegisterClient struct { - rancherClient *RancherClient -} - -type RegisterOperations interface { - List(opts *ListOpts) (*RegisterCollection, error) - Create(opts *Register) (*Register, error) - Update(existing *Register, updates interface{}) (*Register, error) - ById(id string) (*Register, error) - Delete(container *Register) error -} - -func newRegisterClient(rancherClient *RancherClient) *RegisterClient { - return &RegisterClient{ - rancherClient: rancherClient, - } -} - -func (c *RegisterClient) Create(container *Register) (*Register, error) { - resp := &Register{} - err := c.rancherClient.doCreate(REGISTER_TYPE, container, resp) - return resp, err -} - -func (c *RegisterClient) Update(existing *Register, updates interface{}) (*Register, error) { - resp := &Register{} - err := c.rancherClient.doUpdate(REGISTER_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *RegisterClient) List(opts *ListOpts) (*RegisterCollection, error) { - resp := &RegisterCollection{} - err := c.rancherClient.doList(REGISTER_TYPE, opts, resp) - return resp, err -} - -func (c *RegisterClient) ById(id string) (*Register, error) { - resp := &Register{} - err := c.rancherClient.doById(REGISTER_TYPE, id, resp) - return resp, err -} - -func (c *RegisterClient) Delete(container *Register) error { - return c.rancherClient.doResourceDelete(REGISTER_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registration_token.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registration_token.go deleted file mode 100644 index 2a1e4001..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registration_token.go +++ /dev/null @@ -1,136 +0,0 @@ -package client - -const ( - REGISTRATION_TOKEN_TYPE = "registrationToken" -) - -type RegistrationToken struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type RegistrationTokenCollection struct { - Collection - Data []RegistrationToken `json:"data,omitempty"` -} - -type RegistrationTokenClient struct { - rancherClient *RancherClient -} - -type RegistrationTokenOperations interface { - List(opts *ListOpts) (*RegistrationTokenCollection, error) - Create(opts *RegistrationToken) (*RegistrationToken, error) - Update(existing *RegistrationToken, updates interface{}) (*RegistrationToken, error) - ById(id string) (*RegistrationToken, error) - Delete(container *RegistrationToken) error - ActionActivate (*RegistrationToken) (*Credential, error) - ActionCreate (*RegistrationToken) (*Credential, error) - ActionDeactivate (*RegistrationToken) (*Credential, error) - ActionPurge (*RegistrationToken) (*Credential, error) - ActionRemove (*RegistrationToken) (*Credential, error) - ActionRestore (*RegistrationToken) (*Credential, error) - ActionUpdate (*RegistrationToken) (*Credential, error) -} - -func newRegistrationTokenClient(rancherClient *RancherClient) *RegistrationTokenClient { - return &RegistrationTokenClient{ - rancherClient: rancherClient, - } -} - -func (c *RegistrationTokenClient) Create(container *RegistrationToken) (*RegistrationToken, error) { - resp := &RegistrationToken{} - err := c.rancherClient.doCreate(REGISTRATION_TOKEN_TYPE, container, resp) - return resp, err -} - -func (c *RegistrationTokenClient) Update(existing *RegistrationToken, updates interface{}) (*RegistrationToken, error) { - resp := &RegistrationToken{} - err := c.rancherClient.doUpdate(REGISTRATION_TOKEN_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *RegistrationTokenClient) List(opts *ListOpts) (*RegistrationTokenCollection, error) { - resp := &RegistrationTokenCollection{} - err := c.rancherClient.doList(REGISTRATION_TOKEN_TYPE, opts, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ById(id string) (*RegistrationToken, error) { - resp := &RegistrationToken{} - err := c.rancherClient.doById(REGISTRATION_TOKEN_TYPE, id, resp) - return resp, err -} - -func (c *RegistrationTokenClient) Delete(container *RegistrationToken) error { - return c.rancherClient.doResourceDelete(REGISTRATION_TOKEN_TYPE, &container.Resource) -} - -func (c *RegistrationTokenClient) ActionActivate(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ActionCreate(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ActionDeactivate(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ActionPurge(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ActionRemove(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ActionRestore(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *RegistrationTokenClient) ActionUpdate(resource *RegistrationToken) (*Credential, error) { - resp := &Credential{} - err := c.rancherClient.doEmptyAction(REGISTRATION_TOKEN_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry.go deleted file mode 100644 index 1f66db86..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry.go +++ /dev/null @@ -1,89 +0,0 @@ -package client - -const ( - REGISTRY_TYPE = "registry" -) - -type Registry struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - ServerAddress string `json:"serverAddress,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type RegistryCollection struct { - Collection - Data []Registry `json:"data,omitempty"` -} - -type RegistryClient struct { - rancherClient *RancherClient -} - -type RegistryOperations interface { - List(opts *ListOpts) (*RegistryCollection, error) - Create(opts *Registry) (*Registry, error) - Update(existing *Registry, updates interface{}) (*Registry, error) - ById(id string) (*Registry, error) - Delete(container *Registry) error -} - -func newRegistryClient(rancherClient *RancherClient) *RegistryClient { - return &RegistryClient{ - rancherClient: rancherClient, - } -} - -func (c *RegistryClient) Create(container *Registry) (*Registry, error) { - resp := &Registry{} - err := c.rancherClient.doCreate(REGISTRY_TYPE, container, resp) - return resp, err -} - -func (c *RegistryClient) Update(existing *Registry, updates interface{}) (*Registry, error) { - resp := &Registry{} - err := c.rancherClient.doUpdate(REGISTRY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *RegistryClient) List(opts *ListOpts) (*RegistryCollection, error) { - resp := &RegistryCollection{} - err := c.rancherClient.doList(REGISTRY_TYPE, opts, resp) - return resp, err -} - -func (c *RegistryClient) ById(id string) (*Registry, error) { - resp := &Registry{} - err := c.rancherClient.doById(REGISTRY_TYPE, id, resp) - return resp, err -} - -func (c *RegistryClient) Delete(container *Registry) error { - return c.rancherClient.doResourceDelete(REGISTRY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry_credential.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry_credential.go deleted file mode 100644 index 6b2f7600..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_registry_credential.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -const ( - REGISTRY_CREDENTIAL_TYPE = "registryCredential" -) - -type RegistryCredential struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Email string `json:"email,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - PublicValue string `json:"publicValue,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - SecretValue string `json:"secretValue,omitempty"` - - State string `json:"state,omitempty"` - - StoragePoolId string `json:"storagePoolId,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type RegistryCredentialCollection struct { - Collection - Data []RegistryCredential `json:"data,omitempty"` -} - -type RegistryCredentialClient struct { - rancherClient *RancherClient -} - -type RegistryCredentialOperations interface { - List(opts *ListOpts) (*RegistryCredentialCollection, error) - Create(opts *RegistryCredential) (*RegistryCredential, error) - Update(existing *RegistryCredential, updates interface{}) (*RegistryCredential, error) - ById(id string) (*RegistryCredential, error) - Delete(container *RegistryCredential) error -} - -func newRegistryCredentialClient(rancherClient *RancherClient) *RegistryCredentialClient { - return &RegistryCredentialClient{ - rancherClient: rancherClient, - } -} - -func (c *RegistryCredentialClient) Create(container *RegistryCredential) (*RegistryCredential, error) { - resp := &RegistryCredential{} - err := c.rancherClient.doCreate(REGISTRY_CREDENTIAL_TYPE, container, resp) - return resp, err -} - -func (c *RegistryCredentialClient) Update(existing *RegistryCredential, updates interface{}) (*RegistryCredential, error) { - resp := &RegistryCredential{} - err := c.rancherClient.doUpdate(REGISTRY_CREDENTIAL_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *RegistryCredentialClient) List(opts *ListOpts) (*RegistryCredentialCollection, error) { - resp := &RegistryCredentialCollection{} - err := c.rancherClient.doList(REGISTRY_CREDENTIAL_TYPE, opts, resp) - return resp, err -} - -func (c *RegistryCredentialClient) ById(id string) (*RegistryCredential, error) { - resp := &RegistryCredential{} - err := c.rancherClient.doById(REGISTRY_CREDENTIAL_TYPE, id, resp) - return resp, err -} - -func (c *RegistryCredentialClient) Delete(container *RegistryCredential) error { - return c.rancherClient.doResourceDelete(REGISTRY_CREDENTIAL_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_remove_load_balancer_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_remove_load_balancer_input.go deleted file mode 100644 index 8d7aa71b..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_remove_load_balancer_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - REMOVE_LOAD_BALANCER_INPUT_TYPE = "removeLoadBalancerInput" -) - -type RemoveLoadBalancerInput struct { - Resource - - LoadBalancerId string `json:"loadBalancerId,omitempty"` - -} - -type RemoveLoadBalancerInputCollection struct { - Collection - Data []RemoveLoadBalancerInput `json:"data,omitempty"` -} - -type RemoveLoadBalancerInputClient struct { - rancherClient *RancherClient -} - -type RemoveLoadBalancerInputOperations interface { - List(opts *ListOpts) (*RemoveLoadBalancerInputCollection, error) - Create(opts *RemoveLoadBalancerInput) (*RemoveLoadBalancerInput, error) - Update(existing *RemoveLoadBalancerInput, updates interface{}) (*RemoveLoadBalancerInput, error) - ById(id string) (*RemoveLoadBalancerInput, error) - Delete(container *RemoveLoadBalancerInput) error -} - -func newRemoveLoadBalancerInputClient(rancherClient *RancherClient) *RemoveLoadBalancerInputClient { - return &RemoveLoadBalancerInputClient{ - rancherClient: rancherClient, - } -} - -func (c *RemoveLoadBalancerInputClient) Create(container *RemoveLoadBalancerInput) (*RemoveLoadBalancerInput, error) { - resp := &RemoveLoadBalancerInput{} - err := c.rancherClient.doCreate(REMOVE_LOAD_BALANCER_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *RemoveLoadBalancerInputClient) Update(existing *RemoveLoadBalancerInput, updates interface{}) (*RemoveLoadBalancerInput, error) { - resp := &RemoveLoadBalancerInput{} - err := c.rancherClient.doUpdate(REMOVE_LOAD_BALANCER_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *RemoveLoadBalancerInputClient) List(opts *ListOpts) (*RemoveLoadBalancerInputCollection, error) { - resp := &RemoveLoadBalancerInputCollection{} - err := c.rancherClient.doList(REMOVE_LOAD_BALANCER_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *RemoveLoadBalancerInputClient) ById(id string) (*RemoveLoadBalancerInput, error) { - resp := &RemoveLoadBalancerInput{} - err := c.rancherClient.doById(REMOVE_LOAD_BALANCER_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *RemoveLoadBalancerInputClient) Delete(container *RemoveLoadBalancerInput) error { - return c.rancherClient.doResourceDelete(REMOVE_LOAD_BALANCER_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_resource_definition.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_resource_definition.go deleted file mode 100644 index 7b3f2759..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_resource_definition.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - RESOURCE_DEFINITION_TYPE = "resourceDefinition" -) - -type ResourceDefinition struct { - Resource - - Name string `json:"name,omitempty"` - -} - -type ResourceDefinitionCollection struct { - Collection - Data []ResourceDefinition `json:"data,omitempty"` -} - -type ResourceDefinitionClient struct { - rancherClient *RancherClient -} - -type ResourceDefinitionOperations interface { - List(opts *ListOpts) (*ResourceDefinitionCollection, error) - Create(opts *ResourceDefinition) (*ResourceDefinition, error) - Update(existing *ResourceDefinition, updates interface{}) (*ResourceDefinition, error) - ById(id string) (*ResourceDefinition, error) - Delete(container *ResourceDefinition) error -} - -func newResourceDefinitionClient(rancherClient *RancherClient) *ResourceDefinitionClient { - return &ResourceDefinitionClient{ - rancherClient: rancherClient, - } -} - -func (c *ResourceDefinitionClient) Create(container *ResourceDefinition) (*ResourceDefinition, error) { - resp := &ResourceDefinition{} - err := c.rancherClient.doCreate(RESOURCE_DEFINITION_TYPE, container, resp) - return resp, err -} - -func (c *ResourceDefinitionClient) Update(existing *ResourceDefinition, updates interface{}) (*ResourceDefinition, error) { - resp := &ResourceDefinition{} - err := c.rancherClient.doUpdate(RESOURCE_DEFINITION_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *ResourceDefinitionClient) List(opts *ListOpts) (*ResourceDefinitionCollection, error) { - resp := &ResourceDefinitionCollection{} - err := c.rancherClient.doList(RESOURCE_DEFINITION_TYPE, opts, resp) - return resp, err -} - -func (c *ResourceDefinitionClient) ById(id string) (*ResourceDefinition, error) { - resp := &ResourceDefinition{} - err := c.rancherClient.doById(RESOURCE_DEFINITION_TYPE, id, resp) - return resp, err -} - -func (c *ResourceDefinitionClient) Delete(container *ResourceDefinition) error { - return c.rancherClient.doResourceDelete(RESOURCE_DEFINITION_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_restart_policy.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_restart_policy.go deleted file mode 100644 index b77e25e8..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_restart_policy.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - RESTART_POLICY_TYPE = "restartPolicy" -) - -type RestartPolicy struct { - Resource - - MaximumRetryCount int `json:"maximumRetryCount,omitempty"` - - Name string `json:"name,omitempty"` - -} - -type RestartPolicyCollection struct { - Collection - Data []RestartPolicy `json:"data,omitempty"` -} - -type RestartPolicyClient struct { - rancherClient *RancherClient -} - -type RestartPolicyOperations interface { - List(opts *ListOpts) (*RestartPolicyCollection, error) - Create(opts *RestartPolicy) (*RestartPolicy, error) - Update(existing *RestartPolicy, updates interface{}) (*RestartPolicy, error) - ById(id string) (*RestartPolicy, error) - Delete(container *RestartPolicy) error -} - -func newRestartPolicyClient(rancherClient *RancherClient) *RestartPolicyClient { - return &RestartPolicyClient{ - rancherClient: rancherClient, - } -} - -func (c *RestartPolicyClient) Create(container *RestartPolicy) (*RestartPolicy, error) { - resp := &RestartPolicy{} - err := c.rancherClient.doCreate(RESTART_POLICY_TYPE, container, resp) - return resp, err -} - -func (c *RestartPolicyClient) Update(existing *RestartPolicy, updates interface{}) (*RestartPolicy, error) { - resp := &RestartPolicy{} - err := c.rancherClient.doUpdate(RESTART_POLICY_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *RestartPolicyClient) List(opts *ListOpts) (*RestartPolicyCollection, error) { - resp := &RestartPolicyCollection{} - err := c.rancherClient.doList(RESTART_POLICY_TYPE, opts, resp) - return resp, err -} - -func (c *RestartPolicyClient) ById(id string) (*RestartPolicy, error) { - resp := &RestartPolicy{} - err := c.rancherClient.doById(RESTART_POLICY_TYPE, id, resp) - return resp, err -} - -func (c *RestartPolicyClient) Delete(container *RestartPolicy) error { - return c.rancherClient.doResourceDelete(RESTART_POLICY_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_hosts_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_hosts_input.go deleted file mode 100644 index 89eea5b8..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_hosts_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - SET_LOAD_BALANCER_HOSTS_INPUT_TYPE = "setLoadBalancerHostsInput" -) - -type SetLoadBalancerHostsInput struct { - Resource - - HostIds []string `json:"hostIds,omitempty"` - -} - -type SetLoadBalancerHostsInputCollection struct { - Collection - Data []SetLoadBalancerHostsInput `json:"data,omitempty"` -} - -type SetLoadBalancerHostsInputClient struct { - rancherClient *RancherClient -} - -type SetLoadBalancerHostsInputOperations interface { - List(opts *ListOpts) (*SetLoadBalancerHostsInputCollection, error) - Create(opts *SetLoadBalancerHostsInput) (*SetLoadBalancerHostsInput, error) - Update(existing *SetLoadBalancerHostsInput, updates interface{}) (*SetLoadBalancerHostsInput, error) - ById(id string) (*SetLoadBalancerHostsInput, error) - Delete(container *SetLoadBalancerHostsInput) error -} - -func newSetLoadBalancerHostsInputClient(rancherClient *RancherClient) *SetLoadBalancerHostsInputClient { - return &SetLoadBalancerHostsInputClient{ - rancherClient: rancherClient, - } -} - -func (c *SetLoadBalancerHostsInputClient) Create(container *SetLoadBalancerHostsInput) (*SetLoadBalancerHostsInput, error) { - resp := &SetLoadBalancerHostsInput{} - err := c.rancherClient.doCreate(SET_LOAD_BALANCER_HOSTS_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *SetLoadBalancerHostsInputClient) Update(existing *SetLoadBalancerHostsInput, updates interface{}) (*SetLoadBalancerHostsInput, error) { - resp := &SetLoadBalancerHostsInput{} - err := c.rancherClient.doUpdate(SET_LOAD_BALANCER_HOSTS_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *SetLoadBalancerHostsInputClient) List(opts *ListOpts) (*SetLoadBalancerHostsInputCollection, error) { - resp := &SetLoadBalancerHostsInputCollection{} - err := c.rancherClient.doList(SET_LOAD_BALANCER_HOSTS_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *SetLoadBalancerHostsInputClient) ById(id string) (*SetLoadBalancerHostsInput, error) { - resp := &SetLoadBalancerHostsInput{} - err := c.rancherClient.doById(SET_LOAD_BALANCER_HOSTS_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *SetLoadBalancerHostsInputClient) Delete(container *SetLoadBalancerHostsInput) error { - return c.rancherClient.doResourceDelete(SET_LOAD_BALANCER_HOSTS_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_listeners_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_listeners_input.go deleted file mode 100644 index 674c8140..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_listeners_input.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - SET_LOAD_BALANCER_LISTENERS_INPUT_TYPE = "setLoadBalancerListenersInput" -) - -type SetLoadBalancerListenersInput struct { - Resource - - LoadBalancerListenerIds []string `json:"loadBalancerListenerIds,omitempty"` - -} - -type SetLoadBalancerListenersInputCollection struct { - Collection - Data []SetLoadBalancerListenersInput `json:"data,omitempty"` -} - -type SetLoadBalancerListenersInputClient struct { - rancherClient *RancherClient -} - -type SetLoadBalancerListenersInputOperations interface { - List(opts *ListOpts) (*SetLoadBalancerListenersInputCollection, error) - Create(opts *SetLoadBalancerListenersInput) (*SetLoadBalancerListenersInput, error) - Update(existing *SetLoadBalancerListenersInput, updates interface{}) (*SetLoadBalancerListenersInput, error) - ById(id string) (*SetLoadBalancerListenersInput, error) - Delete(container *SetLoadBalancerListenersInput) error -} - -func newSetLoadBalancerListenersInputClient(rancherClient *RancherClient) *SetLoadBalancerListenersInputClient { - return &SetLoadBalancerListenersInputClient{ - rancherClient: rancherClient, - } -} - -func (c *SetLoadBalancerListenersInputClient) Create(container *SetLoadBalancerListenersInput) (*SetLoadBalancerListenersInput, error) { - resp := &SetLoadBalancerListenersInput{} - err := c.rancherClient.doCreate(SET_LOAD_BALANCER_LISTENERS_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *SetLoadBalancerListenersInputClient) Update(existing *SetLoadBalancerListenersInput, updates interface{}) (*SetLoadBalancerListenersInput, error) { - resp := &SetLoadBalancerListenersInput{} - err := c.rancherClient.doUpdate(SET_LOAD_BALANCER_LISTENERS_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *SetLoadBalancerListenersInputClient) List(opts *ListOpts) (*SetLoadBalancerListenersInputCollection, error) { - resp := &SetLoadBalancerListenersInputCollection{} - err := c.rancherClient.doList(SET_LOAD_BALANCER_LISTENERS_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *SetLoadBalancerListenersInputClient) ById(id string) (*SetLoadBalancerListenersInput, error) { - resp := &SetLoadBalancerListenersInput{} - err := c.rancherClient.doById(SET_LOAD_BALANCER_LISTENERS_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *SetLoadBalancerListenersInputClient) Delete(container *SetLoadBalancerListenersInput) error { - return c.rancherClient.doResourceDelete(SET_LOAD_BALANCER_LISTENERS_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_targets_input.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_targets_input.go deleted file mode 100644 index 19b2b865..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_set_load_balancer_targets_input.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - SET_LOAD_BALANCER_TARGETS_INPUT_TYPE = "setLoadBalancerTargetsInput" -) - -type SetLoadBalancerTargetsInput struct { - Resource - - InstanceIds []string `json:"instanceIds,omitempty"` - - IpAddresses []string `json:"ipAddresses,omitempty"` - -} - -type SetLoadBalancerTargetsInputCollection struct { - Collection - Data []SetLoadBalancerTargetsInput `json:"data,omitempty"` -} - -type SetLoadBalancerTargetsInputClient struct { - rancherClient *RancherClient -} - -type SetLoadBalancerTargetsInputOperations interface { - List(opts *ListOpts) (*SetLoadBalancerTargetsInputCollection, error) - Create(opts *SetLoadBalancerTargetsInput) (*SetLoadBalancerTargetsInput, error) - Update(existing *SetLoadBalancerTargetsInput, updates interface{}) (*SetLoadBalancerTargetsInput, error) - ById(id string) (*SetLoadBalancerTargetsInput, error) - Delete(container *SetLoadBalancerTargetsInput) error -} - -func newSetLoadBalancerTargetsInputClient(rancherClient *RancherClient) *SetLoadBalancerTargetsInputClient { - return &SetLoadBalancerTargetsInputClient{ - rancherClient: rancherClient, - } -} - -func (c *SetLoadBalancerTargetsInputClient) Create(container *SetLoadBalancerTargetsInput) (*SetLoadBalancerTargetsInput, error) { - resp := &SetLoadBalancerTargetsInput{} - err := c.rancherClient.doCreate(SET_LOAD_BALANCER_TARGETS_INPUT_TYPE, container, resp) - return resp, err -} - -func (c *SetLoadBalancerTargetsInputClient) Update(existing *SetLoadBalancerTargetsInput, updates interface{}) (*SetLoadBalancerTargetsInput, error) { - resp := &SetLoadBalancerTargetsInput{} - err := c.rancherClient.doUpdate(SET_LOAD_BALANCER_TARGETS_INPUT_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *SetLoadBalancerTargetsInputClient) List(opts *ListOpts) (*SetLoadBalancerTargetsInputCollection, error) { - resp := &SetLoadBalancerTargetsInputCollection{} - err := c.rancherClient.doList(SET_LOAD_BALANCER_TARGETS_INPUT_TYPE, opts, resp) - return resp, err -} - -func (c *SetLoadBalancerTargetsInputClient) ById(id string) (*SetLoadBalancerTargetsInput, error) { - resp := &SetLoadBalancerTargetsInput{} - err := c.rancherClient.doById(SET_LOAD_BALANCER_TARGETS_INPUT_TYPE, id, resp) - return resp, err -} - -func (c *SetLoadBalancerTargetsInputClient) Delete(container *SetLoadBalancerTargetsInput) error { - return c.rancherClient.doResourceDelete(SET_LOAD_BALANCER_TARGETS_INPUT_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_setting.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_setting.go deleted file mode 100644 index 2833c71a..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_setting.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - SETTING_TYPE = "setting" -) - -type Setting struct { - Resource - - Name string `json:"name,omitempty"` - - Value string `json:"value,omitempty"` - -} - -type SettingCollection struct { - Collection - Data []Setting `json:"data,omitempty"` -} - -type SettingClient struct { - rancherClient *RancherClient -} - -type SettingOperations interface { - List(opts *ListOpts) (*SettingCollection, error) - Create(opts *Setting) (*Setting, error) - Update(existing *Setting, updates interface{}) (*Setting, error) - ById(id string) (*Setting, error) - Delete(container *Setting) error -} - -func newSettingClient(rancherClient *RancherClient) *SettingClient { - return &SettingClient{ - rancherClient: rancherClient, - } -} - -func (c *SettingClient) Create(container *Setting) (*Setting, error) { - resp := &Setting{} - err := c.rancherClient.doCreate(SETTING_TYPE, container, resp) - return resp, err -} - -func (c *SettingClient) Update(existing *Setting, updates interface{}) (*Setting, error) { - resp := &Setting{} - err := c.rancherClient.doUpdate(SETTING_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *SettingClient) List(opts *ListOpts) (*SettingCollection, error) { - resp := &SettingCollection{} - err := c.rancherClient.doList(SETTING_TYPE, opts, resp) - return resp, err -} - -func (c *SettingClient) ById(id string) (*Setting, error) { - resp := &Setting{} - err := c.rancherClient.doById(SETTING_TYPE, id, resp) - return resp, err -} - -func (c *SettingClient) Delete(container *Setting) error { - return c.rancherClient.doResourceDelete(SETTING_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_stats_access.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_stats_access.go deleted file mode 100644 index 241524b9..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_stats_access.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - STATS_ACCESS_TYPE = "statsAccess" -) - -type StatsAccess struct { - Resource - - Token string `json:"token,omitempty"` - - Url string `json:"url,omitempty"` - -} - -type StatsAccessCollection struct { - Collection - Data []StatsAccess `json:"data,omitempty"` -} - -type StatsAccessClient struct { - rancherClient *RancherClient -} - -type StatsAccessOperations interface { - List(opts *ListOpts) (*StatsAccessCollection, error) - Create(opts *StatsAccess) (*StatsAccess, error) - Update(existing *StatsAccess, updates interface{}) (*StatsAccess, error) - ById(id string) (*StatsAccess, error) - Delete(container *StatsAccess) error -} - -func newStatsAccessClient(rancherClient *RancherClient) *StatsAccessClient { - return &StatsAccessClient{ - rancherClient: rancherClient, - } -} - -func (c *StatsAccessClient) Create(container *StatsAccess) (*StatsAccess, error) { - resp := &StatsAccess{} - err := c.rancherClient.doCreate(STATS_ACCESS_TYPE, container, resp) - return resp, err -} - -func (c *StatsAccessClient) Update(existing *StatsAccess, updates interface{}) (*StatsAccess, error) { - resp := &StatsAccess{} - err := c.rancherClient.doUpdate(STATS_ACCESS_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *StatsAccessClient) List(opts *ListOpts) (*StatsAccessCollection, error) { - resp := &StatsAccessCollection{} - err := c.rancherClient.doList(STATS_ACCESS_TYPE, opts, resp) - return resp, err -} - -func (c *StatsAccessClient) ById(id string) (*StatsAccess, error) { - resp := &StatsAccess{} - err := c.rancherClient.doById(STATS_ACCESS_TYPE, id, resp) - return resp, err -} - -func (c *StatsAccessClient) Delete(container *StatsAccess) error { - return c.rancherClient.doResourceDelete(STATS_ACCESS_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_storage_pool.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_storage_pool.go deleted file mode 100644 index 7108cd97..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_storage_pool.go +++ /dev/null @@ -1,136 +0,0 @@ -package client - -const ( - STORAGE_POOL_TYPE = "storagePool" -) - -type StoragePool struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type StoragePoolCollection struct { - Collection - Data []StoragePool `json:"data,omitempty"` -} - -type StoragePoolClient struct { - rancherClient *RancherClient -} - -type StoragePoolOperations interface { - List(opts *ListOpts) (*StoragePoolCollection, error) - Create(opts *StoragePool) (*StoragePool, error) - Update(existing *StoragePool, updates interface{}) (*StoragePool, error) - ById(id string) (*StoragePool, error) - Delete(container *StoragePool) error - ActionActivate (*StoragePool) (*StoragePool, error) - ActionCreate (*StoragePool) (*StoragePool, error) - ActionDeactivate (*StoragePool) (*StoragePool, error) - ActionPurge (*StoragePool) (*StoragePool, error) - ActionRemove (*StoragePool) (*StoragePool, error) - ActionRestore (*StoragePool) (*StoragePool, error) - ActionUpdate (*StoragePool) (*StoragePool, error) -} - -func newStoragePoolClient(rancherClient *RancherClient) *StoragePoolClient { - return &StoragePoolClient{ - rancherClient: rancherClient, - } -} - -func (c *StoragePoolClient) Create(container *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doCreate(STORAGE_POOL_TYPE, container, resp) - return resp, err -} - -func (c *StoragePoolClient) Update(existing *StoragePool, updates interface{}) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doUpdate(STORAGE_POOL_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *StoragePoolClient) List(opts *ListOpts) (*StoragePoolCollection, error) { - resp := &StoragePoolCollection{} - err := c.rancherClient.doList(STORAGE_POOL_TYPE, opts, resp) - return resp, err -} - -func (c *StoragePoolClient) ById(id string) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doById(STORAGE_POOL_TYPE, id, resp) - return resp, err -} - -func (c *StoragePoolClient) Delete(container *StoragePool) error { - return c.rancherClient.doResourceDelete(STORAGE_POOL_TYPE, &container.Resource) -} - -func (c *StoragePoolClient) ActionActivate(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *StoragePoolClient) ActionCreate(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *StoragePoolClient) ActionDeactivate(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *StoragePoolClient) ActionPurge(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *StoragePoolClient) ActionRemove(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *StoragePoolClient) ActionRestore(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *StoragePoolClient) ActionUpdate(resource *StoragePool) (*StoragePool, error) { - resp := &StoragePool{} - err := c.rancherClient.doEmptyAction(STORAGE_POOL_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_subscribe.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_subscribe.go deleted file mode 100644 index 7031f530..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_subscribe.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -const ( - SUBSCRIBE_TYPE = "subscribe" -) - -type Subscribe struct { - Resource - - AgentId string `json:"agentId,omitempty"` - - EventNames []string `json:"eventNames,omitempty"` - -} - -type SubscribeCollection struct { - Collection - Data []Subscribe `json:"data,omitempty"` -} - -type SubscribeClient struct { - rancherClient *RancherClient -} - -type SubscribeOperations interface { - List(opts *ListOpts) (*SubscribeCollection, error) - Create(opts *Subscribe) (*Subscribe, error) - Update(existing *Subscribe, updates interface{}) (*Subscribe, error) - ById(id string) (*Subscribe, error) - Delete(container *Subscribe) error -} - -func newSubscribeClient(rancherClient *RancherClient) *SubscribeClient { - return &SubscribeClient{ - rancherClient: rancherClient, - } -} - -func (c *SubscribeClient) Create(container *Subscribe) (*Subscribe, error) { - resp := &Subscribe{} - err := c.rancherClient.doCreate(SUBSCRIBE_TYPE, container, resp) - return resp, err -} - -func (c *SubscribeClient) Update(existing *Subscribe, updates interface{}) (*Subscribe, error) { - resp := &Subscribe{} - err := c.rancherClient.doUpdate(SUBSCRIBE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *SubscribeClient) List(opts *ListOpts) (*SubscribeCollection, error) { - resp := &SubscribeCollection{} - err := c.rancherClient.doList(SUBSCRIBE_TYPE, opts, resp) - return resp, err -} - -func (c *SubscribeClient) ById(id string) (*Subscribe, error) { - resp := &Subscribe{} - err := c.rancherClient.doById(SUBSCRIBE_TYPE, id, resp) - return resp, err -} - -func (c *SubscribeClient) Delete(container *Subscribe) error { - return c.rancherClient.doResourceDelete(SUBSCRIBE_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task.go deleted file mode 100644 index 3fb3dccf..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -const ( - TASK_TYPE = "task" -) - -type Task struct { - Resource - - Name string `json:"name,omitempty"` - -} - -type TaskCollection struct { - Collection - Data []Task `json:"data,omitempty"` -} - -type TaskClient struct { - rancherClient *RancherClient -} - -type TaskOperations interface { - List(opts *ListOpts) (*TaskCollection, error) - Create(opts *Task) (*Task, error) - Update(existing *Task, updates interface{}) (*Task, error) - ById(id string) (*Task, error) - Delete(container *Task) error - ActionExecute (*Task) (*Task, error) -} - -func newTaskClient(rancherClient *RancherClient) *TaskClient { - return &TaskClient{ - rancherClient: rancherClient, - } -} - -func (c *TaskClient) Create(container *Task) (*Task, error) { - resp := &Task{} - err := c.rancherClient.doCreate(TASK_TYPE, container, resp) - return resp, err -} - -func (c *TaskClient) Update(existing *Task, updates interface{}) (*Task, error) { - resp := &Task{} - err := c.rancherClient.doUpdate(TASK_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *TaskClient) List(opts *ListOpts) (*TaskCollection, error) { - resp := &TaskCollection{} - err := c.rancherClient.doList(TASK_TYPE, opts, resp) - return resp, err -} - -func (c *TaskClient) ById(id string) (*Task, error) { - resp := &Task{} - err := c.rancherClient.doById(TASK_TYPE, id, resp) - return resp, err -} - -func (c *TaskClient) Delete(container *Task) error { - return c.rancherClient.doResourceDelete(TASK_TYPE, &container.Resource) -} - -func (c *TaskClient) ActionExecute(resource *Task) (*Task, error) { - resp := &Task{} - err := c.rancherClient.doEmptyAction(TASK_TYPE, "execute", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task_instance.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task_instance.go deleted file mode 100644 index 55f753d8..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_task_instance.go +++ /dev/null @@ -1,73 +0,0 @@ -package client - -const ( - TASK_INSTANCE_TYPE = "taskInstance" -) - -type TaskInstance struct { - Resource - - EndTime string `json:"endTime,omitempty"` - - Exception string `json:"exception,omitempty"` - - Name string `json:"name,omitempty"` - - ServerId string `json:"serverId,omitempty"` - - StartTime string `json:"startTime,omitempty"` - - TaskId string `json:"taskId,omitempty"` - -} - -type TaskInstanceCollection struct { - Collection - Data []TaskInstance `json:"data,omitempty"` -} - -type TaskInstanceClient struct { - rancherClient *RancherClient -} - -type TaskInstanceOperations interface { - List(opts *ListOpts) (*TaskInstanceCollection, error) - Create(opts *TaskInstance) (*TaskInstance, error) - Update(existing *TaskInstance, updates interface{}) (*TaskInstance, error) - ById(id string) (*TaskInstance, error) - Delete(container *TaskInstance) error -} - -func newTaskInstanceClient(rancherClient *RancherClient) *TaskInstanceClient { - return &TaskInstanceClient{ - rancherClient: rancherClient, - } -} - -func (c *TaskInstanceClient) Create(container *TaskInstance) (*TaskInstance, error) { - resp := &TaskInstance{} - err := c.rancherClient.doCreate(TASK_INSTANCE_TYPE, container, resp) - return resp, err -} - -func (c *TaskInstanceClient) Update(existing *TaskInstance, updates interface{}) (*TaskInstance, error) { - resp := &TaskInstance{} - err := c.rancherClient.doUpdate(TASK_INSTANCE_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *TaskInstanceClient) List(opts *ListOpts) (*TaskInstanceCollection, error) { - resp := &TaskInstanceCollection{} - err := c.rancherClient.doList(TASK_INSTANCE_TYPE, opts, resp) - return resp, err -} - -func (c *TaskInstanceClient) ById(id string) (*TaskInstance, error) { - resp := &TaskInstance{} - err := c.rancherClient.doById(TASK_INSTANCE_TYPE, id, resp) - return resp, err -} - -func (c *TaskInstanceClient) Delete(container *TaskInstance) error { - return c.rancherClient.doResourceDelete(TASK_INSTANCE_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_type_documentation.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_type_documentation.go deleted file mode 100644 index fa0c48a7..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_type_documentation.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -const ( - TYPE_DOCUMENTATION_TYPE = "typeDocumentation" -) - -type TypeDocumentation struct { - Resource - - Description string `json:"description,omitempty"` - -} - -type TypeDocumentationCollection struct { - Collection - Data []TypeDocumentation `json:"data,omitempty"` -} - -type TypeDocumentationClient struct { - rancherClient *RancherClient -} - -type TypeDocumentationOperations interface { - List(opts *ListOpts) (*TypeDocumentationCollection, error) - Create(opts *TypeDocumentation) (*TypeDocumentation, error) - Update(existing *TypeDocumentation, updates interface{}) (*TypeDocumentation, error) - ById(id string) (*TypeDocumentation, error) - Delete(container *TypeDocumentation) error -} - -func newTypeDocumentationClient(rancherClient *RancherClient) *TypeDocumentationClient { - return &TypeDocumentationClient{ - rancherClient: rancherClient, - } -} - -func (c *TypeDocumentationClient) Create(container *TypeDocumentation) (*TypeDocumentation, error) { - resp := &TypeDocumentation{} - err := c.rancherClient.doCreate(TYPE_DOCUMENTATION_TYPE, container, resp) - return resp, err -} - -func (c *TypeDocumentationClient) Update(existing *TypeDocumentation, updates interface{}) (*TypeDocumentation, error) { - resp := &TypeDocumentation{} - err := c.rancherClient.doUpdate(TYPE_DOCUMENTATION_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *TypeDocumentationClient) List(opts *ListOpts) (*TypeDocumentationCollection, error) { - resp := &TypeDocumentationCollection{} - err := c.rancherClient.doList(TYPE_DOCUMENTATION_TYPE, opts, resp) - return resp, err -} - -func (c *TypeDocumentationClient) ById(id string) (*TypeDocumentation, error) { - resp := &TypeDocumentation{} - err := c.rancherClient.doById(TYPE_DOCUMENTATION_TYPE, id, resp) - return resp, err -} - -func (c *TypeDocumentationClient) Delete(container *TypeDocumentation) error { - return c.rancherClient.doResourceDelete(TYPE_DOCUMENTATION_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_virtualbox_config.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_virtualbox_config.go deleted file mode 100644 index 3c9ee813..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_virtualbox_config.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -const ( - VIRTUALBOX_CONFIG_TYPE = "virtualboxConfig" -) - -type VirtualboxConfig struct { - Resource - - Boot2dockerUrl string `json:"boot2dockerUrl,omitempty"` - - DiskSize string `json:"diskSize,omitempty"` - - Memory string `json:"memory,omitempty"` - -} - -type VirtualboxConfigCollection struct { - Collection - Data []VirtualboxConfig `json:"data,omitempty"` -} - -type VirtualboxConfigClient struct { - rancherClient *RancherClient -} - -type VirtualboxConfigOperations interface { - List(opts *ListOpts) (*VirtualboxConfigCollection, error) - Create(opts *VirtualboxConfig) (*VirtualboxConfig, error) - Update(existing *VirtualboxConfig, updates interface{}) (*VirtualboxConfig, error) - ById(id string) (*VirtualboxConfig, error) - Delete(container *VirtualboxConfig) error -} - -func newVirtualboxConfigClient(rancherClient *RancherClient) *VirtualboxConfigClient { - return &VirtualboxConfigClient{ - rancherClient: rancherClient, - } -} - -func (c *VirtualboxConfigClient) Create(container *VirtualboxConfig) (*VirtualboxConfig, error) { - resp := &VirtualboxConfig{} - err := c.rancherClient.doCreate(VIRTUALBOX_CONFIG_TYPE, container, resp) - return resp, err -} - -func (c *VirtualboxConfigClient) Update(existing *VirtualboxConfig, updates interface{}) (*VirtualboxConfig, error) { - resp := &VirtualboxConfig{} - err := c.rancherClient.doUpdate(VIRTUALBOX_CONFIG_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *VirtualboxConfigClient) List(opts *ListOpts) (*VirtualboxConfigCollection, error) { - resp := &VirtualboxConfigCollection{} - err := c.rancherClient.doList(VIRTUALBOX_CONFIG_TYPE, opts, resp) - return resp, err -} - -func (c *VirtualboxConfigClient) ById(id string) (*VirtualboxConfig, error) { - resp := &VirtualboxConfig{} - err := c.rancherClient.doById(VIRTUALBOX_CONFIG_TYPE, id, resp) - return resp, err -} - -func (c *VirtualboxConfigClient) Delete(container *VirtualboxConfig) error { - return c.rancherClient.doResourceDelete(VIRTUALBOX_CONFIG_TYPE, &container.Resource) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_volume.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_volume.go deleted file mode 100644 index 451ed894..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_volume.go +++ /dev/null @@ -1,158 +0,0 @@ -package client - -const ( - VOLUME_TYPE = "volume" -) - -type Volume struct { - Resource - - AccountId string `json:"accountId,omitempty"` - - Created string `json:"created,omitempty"` - - Data map[string]interface{} `json:"data,omitempty"` - - Description string `json:"description,omitempty"` - - ImageId string `json:"imageId,omitempty"` - - InstanceId string `json:"instanceId,omitempty"` - - IsHostPath bool `json:"isHostPath,omitempty"` - - Kind string `json:"kind,omitempty"` - - Name string `json:"name,omitempty"` - - RemoveTime string `json:"removeTime,omitempty"` - - Removed string `json:"removed,omitempty"` - - State string `json:"state,omitempty"` - - Transitioning string `json:"transitioning,omitempty"` - - TransitioningMessage string `json:"transitioningMessage,omitempty"` - - TransitioningProgress int `json:"transitioningProgress,omitempty"` - - Uri string `json:"uri,omitempty"` - - Uuid string `json:"uuid,omitempty"` - -} - -type VolumeCollection struct { - Collection - Data []Volume `json:"data,omitempty"` -} - -type VolumeClient struct { - rancherClient *RancherClient -} - -type VolumeOperations interface { - List(opts *ListOpts) (*VolumeCollection, error) - Create(opts *Volume) (*Volume, error) - Update(existing *Volume, updates interface{}) (*Volume, error) - ById(id string) (*Volume, error) - Delete(container *Volume) error - ActionActivate (*Volume) (*Volume, error) - ActionAllocate (*Volume) (*Volume, error) - ActionCreate (*Volume) (*Volume, error) - ActionDeactivate (*Volume) (*Volume, error) - ActionDeallocate (*Volume) (*Volume, error) - ActionPurge (*Volume) (*Volume, error) - ActionRemove (*Volume) (*Volume, error) - ActionRestore (*Volume) (*Volume, error) - ActionUpdate (*Volume) (*Volume, error) -} - -func newVolumeClient(rancherClient *RancherClient) *VolumeClient { - return &VolumeClient{ - rancherClient: rancherClient, - } -} - -func (c *VolumeClient) Create(container *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doCreate(VOLUME_TYPE, container, resp) - return resp, err -} - -func (c *VolumeClient) Update(existing *Volume, updates interface{}) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doUpdate(VOLUME_TYPE, &existing.Resource, updates, resp) - return resp, err -} - -func (c *VolumeClient) List(opts *ListOpts) (*VolumeCollection, error) { - resp := &VolumeCollection{} - err := c.rancherClient.doList(VOLUME_TYPE, opts, resp) - return resp, err -} - -func (c *VolumeClient) ById(id string) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doById(VOLUME_TYPE, id, resp) - return resp, err -} - -func (c *VolumeClient) Delete(container *Volume) error { - return c.rancherClient.doResourceDelete(VOLUME_TYPE, &container.Resource) -} - -func (c *VolumeClient) ActionActivate(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "activate", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionAllocate(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "allocate", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionCreate(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "create", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionDeactivate(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "deactivate", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionDeallocate(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "deallocate", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionPurge(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "purge", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionRemove(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "remove", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionRestore(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "restore", &resource.Resource, resp) - return resp, err -} - -func (c *VolumeClient) ActionUpdate(resource *Volume) (*Volume, error) { - resp := &Volume{} - err := c.rancherClient.doEmptyAction(VOLUME_TYPE, "update", &resource.Resource, resp) - return resp, err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/types.go b/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/types.go deleted file mode 100644 index 7d7c1a25..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/go-rancher/client/types.go +++ /dev/null @@ -1,89 +0,0 @@ -package client - -type Collection struct { - Type string `json:"type,omitempty"` - ResourceType string `json:"resourceType,omitempty"` - Links map[string]string `json:"links,omitempty"` - CreateTypes map[string]string `json:"createTypes,omitempty"` - Actions map[string]string `json:"actions,omitempty"` - SortLinks map[string]string `json:"sortLinks,omitempty"` - Pagination *Pagination `json:"pagination,omitempty"` - Sort *Sort `json:"sort,omitempty"` - Filters map[string][]Condition `json:"filters,omitempty"` -} - -type Sort struct { - Name string `json:"name,omitempty"` - Order string `json:"order,omitempty"` - Reverse string `json:"reverse,omitempty"` -} - -type Condition struct { - Modifier string `json:"modifier,omitempty"` - Value interface{} `json:"value,omitempty"` -} - -type Pagination struct { - Marker string `json:"marker,omitempty"` - First string `json:"first,omitempty"` - Previous string `json:"previous,omitempty"` - Next string `json:"next,omitempty"` - Limit *int64 `json:"limit,omitempty"` - Total *int64 `json:"total,omitempty"` - Partial bool `json:"partial,omitempty"` -} - -type Resource struct { - Id string `json:"id,omitempty"` - Type string `json:"type,omitempty"` - Links map[string]string `json:"links,omitempty"` - Actions map[string]string `json:"actions,omitempty"` -} - -type Schemas struct { - Collection - Data []Schema `json:"data,omitempty"` -} - -type Schema struct { - Resource - PluralName string `json:"pluralName,omitempty"` - ResourceMethods []string `json:"resourceMethods,omitempty"` - ResourceFields map[string]Field `json:"resourceFields,omitempty"` - ResourceActions map[string]Action `json:"resourceActions,omitempty"` - CollectionMethods []string `json:"collectionMethods,omitempty"` - CollectionFields map[string]Field `json:"collectionFields,omitempty"` - CollectionActions map[string]Action `json:"collectionActions,omitempty"` - CollectionFilters map[string]Filter `json:"collectionFilters,omitempty"` - IncludeableLinks []string `json:"includeableLinks,omitempty"` -} - -type Field struct { - Type string `json:"type,omitempty"` - Default interface{} `json:"default,omitempty"` - Unique bool `json:"unique,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Create bool `json:"create,omitempty"` - Required bool `json:"required,omitempty"` - Update bool `json:"update,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` - Options []string `json:"options,omitempty"` - ValidChars string `json:"validChars,omitempty"` - InvalidChars string `json:"invalidChars,omitempty"` -} - -type Action struct { - Input string `json:"input,omitempty"` - Output string `json:"output,omitempty"` -} - -type Filter struct { - Modifiers []string `json:"modifiers,omitempty"` -} - -type ListOpts struct { - Filters map[string]interface{} -} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project.go b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project.go deleted file mode 100644 index 0992af22..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project.go +++ /dev/null @@ -1,279 +0,0 @@ -package project - -import ( - "errors" - "fmt" - "strings" - - log "github.com/Sirupsen/logrus" -) - -type ServiceState string - -var ( - EXECUTED ServiceState = ServiceState("executed") - UNKNOWN ServiceState = ServiceState("unknown") - ErrRestart error = errors.New("Restart execution") -) - -type ProjectEvent struct { - Event Event - ServiceName string - Data map[string]string -} - -type wrapperAction func(*serviceWrapper, map[string]*serviceWrapper) - -func NewProject(name string, factory ServiceFactory) *Project { - p := &Project{ - Name: name, - Configs: make(map[string]*ServiceConfig), - factory: factory, - } - - listener := defaultListener{ - listenChan: make(chan ProjectEvent), - project: p, - } - - p.listeners = []chan<- ProjectEvent{listener.listenChan} - - go listener.start() - - return p -} - -func (p *Project) CreateService(name string) (Service, error) { - existing, ok := p.Configs[name] - if !ok { - return nil, fmt.Errorf("Failed to find service: %s", name) - } - - // Copy because we are about to modify the environment - config := *existing - - if p.EnvironmentLookup != nil { - parsedEnv := make([]string, 0, len(config.Environment.Slice())) - - for _, env := range config.Environment.Slice() { - if strings.IndexRune(env, '=') != -1 { - parsedEnv = append(parsedEnv, env) - continue - } - - for _, value := range p.EnvironmentLookup.Lookup(env, name, &config) { - parsedEnv = append(parsedEnv, value) - } - } - - config.Environment = NewMaporEqualSlice(parsedEnv) - } - - return p.factory.Create(p, name, &config) -} - -func (p *Project) AddConfig(name string, config *ServiceConfig) error { - p.Notify(SERVICE_ADD, name, nil) - - p.Configs[name] = config - p.reload = append(p.reload, name) - - return nil -} - -func (p *Project) Load(bytes []byte) error { - configs := make(map[string]*ServiceConfig) - configs, err := Merge(p, bytes) - if err != nil { - log.Fatalf("Could not parse config for project %s : %v", p.Name, err) - } - - for name, config := range configs { - err := p.AddConfig(name, config) - if err != nil { - return err - } - } - - return nil -} - -func (p *Project) loadWrappers(wrappers map[string]*serviceWrapper) error { - for _, name := range p.reload { - wrapper, err := newServiceWrapper(name, p) - if err != nil { - return err - } - wrappers[name] = wrapper - } - - p.reload = []string{} - - return nil -} - -func (p *Project) Create(services ...string) error { - p.Notify(PROJECT_CREATE_START, "", nil) - - err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Create(wrappers) - })) - - if err == nil { - p.Notify(PROJECT_CREATE_DONE, "", nil) - } - - return err -} - -func (p *Project) Down(services ...string) error { - p.Notify(PROJECT_DOWN_START, "", nil) - - err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Stop(wrappers) - })) - - if err == nil { - p.Notify(PROJECT_DOWN_DONE, "", nil) - } - - return err -} - -func (p *Project) Restart(services ...string) error { - p.Notify(PROJECT_RESTART_START, "", nil) - - err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Restart(wrappers) - })) - - if err == nil { - p.Notify(PROJECT_RESTART_DONE, "", nil) - } - - return err -} - -func (p *Project) Up(services ...string) error { - p.Notify(PROJECT_UP_START, "", nil) - - err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Start(wrappers) - })) - - if err == nil { - p.Notify(PROJECT_UP_DONE, "", nil) - } - - return err -} - -func (p *Project) Log(services ...string) error { - return p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Log(wrappers) - })) -} - -func (p *Project) Delete(services ...string) error { - p.Notify(PROJECT_DELETE_START, "", nil) - - err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) { - wrapper.Delete(wrappers) - })) - - if err == nil { - p.Notify(PROJECT_DELETE_DONE, "", nil) - } - - return err -} - -func isSelected(wrapper *serviceWrapper, selected map[string]bool) bool { - return len(selected) == 0 || selected[wrapper.name] -} - -func (p *Project) forEach(services []string, action wrapperAction) error { - selected := make(map[string]bool) - wrappers := make(map[string]*serviceWrapper) - - for _, s := range services { - selected[s] = true - } - - return p.traverse(selected, wrappers, action) -} - -func (p *Project) traverse(selected map[string]bool, wrappers map[string]*serviceWrapper, action wrapperAction) error { - restart := false - - for _, wrapper := range wrappers { - if err := wrapper.Reset(); err != nil { - return err - } - } - - p.loadWrappers(wrappers) - - for _, wrapper := range wrappers { - if isSelected(wrapper, selected) { - go action(wrapper, wrappers) - } else { - wrapper.Ignore() - } - } - - var firstError error - - for _, wrapper := range wrappers { - if !isSelected(wrapper, selected) { - continue - } - if err := wrapper.Wait(); err == ErrRestart { - restart = true - } else if err != nil { - log.Errorf("Failed to start: %s : %v", wrapper.name, err) - if firstError == nil { - firstError = err - } - } - } - - if restart { - if p.ReloadCallback != nil { - if err := p.ReloadCallback(); err != nil { - log.Errorf("Failed calling callback: %v", err) - } - } - return p.traverse(selected, wrappers, action) - } else { - return firstError - } -} - -func (p *Project) AddListener(c chan<- ProjectEvent) { - if !p.hasListeners { - for _, l := range p.listeners { - close(l) - } - p.hasListeners = true - p.listeners = []chan<- ProjectEvent{c} - } else { - p.listeners = append(p.listeners, c) - } -} - -func (p *Project) Notify(event Event, serviceName string, data map[string]string) { - projectEvent := ProjectEvent{ - Event: event, - ServiceName: serviceName, - Data: data, - } - - for _, l := range p.listeners { - // Don't ever block - select { - case l <- projectEvent: - default: - } - } -} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project_test.go b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project_test.go deleted file mode 100644 index 1dbac662..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/project_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package project - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type testFactory struct { -} - -func (*testFactory) Create(project *Project, name string, serviceConfig *ServiceConfig) (Service, error) { - return &testService{}, nil -} - -type testService struct { - EmptyService -} - -func (*testService) Name() string { return "" } -func (*testService) Up() error { return nil } -func (*testService) Config() *ServiceConfig { return &ServiceConfig{} } - -func TestNewProject(t *testing.T) { - p := NewProject("foo", &testFactory{}) - assert.Equal(t, "foo", p.Name) -} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/service-wrapper.go b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/service-wrapper.go deleted file mode 100644 index cc71ca45..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/service-wrapper.go +++ /dev/null @@ -1,199 +0,0 @@ -package project - -import ( - "strings" - "sync" - - log "github.com/Sirupsen/logrus" -) - -type serviceWrapper struct { - name string - service Service - done sync.WaitGroup - state ServiceState - err error - project *Project -} - -func newServiceWrapper(name string, p *Project) (*serviceWrapper, error) { - wrapper := &serviceWrapper{ - name: name, - state: UNKNOWN, - project: p, - } - - return wrapper, wrapper.Reset() -} - -func (s *serviceWrapper) Reset() error { - if s.state != EXECUTED { - service, err := s.project.CreateService(s.name) - if err != nil { - log.Errorf("Failed to create service for %s : %v", s.name, err) - return err - } - - s.service = service - } - - if s.err == ErrRestart { - s.err = nil - } - s.done.Add(1) - - return nil -} - -func (s *serviceWrapper) Ignore() { - s.state = EXECUTED - s.project.Notify(SERVICE_UP_IGNORED, s.service.Name(), nil) - s.done.Done() -} - -func (s *serviceWrapper) Stop(wrappers map[string]*serviceWrapper) { - defer s.done.Done() - - if s.state == EXECUTED { - return - } - - s.state = EXECUTED - - s.project.Notify(SERVICE_DOWN_START, s.service.Name(), nil) - - s.err = s.service.Down() - if s.err != nil { - log.Errorf("Failed to stop %s : %v", s.name, s.err) - } else { - s.project.Notify(SERVICE_DOWN, s.service.Name(), nil) - } -} - -func (s *serviceWrapper) Log(wrappers map[string]*serviceWrapper) { - defer s.done.Done() - - if s.state == EXECUTED { - return - } - - s.state = EXECUTED - - s.err = s.service.Log() - if s.err != nil { - log.Errorf("Failed to log %s : %v", s.name, s.err) - } -} - -func (s *serviceWrapper) Delete(wrappers map[string]*serviceWrapper) { - defer s.done.Done() - - if s.state == EXECUTED { - return - } - - s.state = EXECUTED - - s.project.Notify(SERVICE_DELETE_START, s.service.Name(), nil) - - s.err = s.service.Delete() - if s.err != nil { - log.Errorf("Failed to delete %s : %v", s.name, s.err) - } else { - s.project.Notify(SERVICE_DELETE, s.service.Name(), nil) - } -} - -func (s *serviceWrapper) waitForDeps(wrappers map[string]*serviceWrapper) bool { - for _, link := range append(s.service.Config().Links.Slice(), s.service.Config().VolumesFrom...) { - name := strings.Split(link, ":")[0] - if wrapper, ok := wrappers[name]; ok { - if wrapper.Wait() == ErrRestart { - s.project.Notify(PROJECT_RELOAD, wrapper.service.Name(), nil) - s.err = ErrRestart - return false - } - } else { - log.Errorf("Failed to find %s", name) - } - } - - return true -} - -func (s *serviceWrapper) Restart(wrappers map[string]*serviceWrapper) { - defer s.done.Done() - - if s.state == EXECUTED { - return - } - - if !s.waitForDeps(wrappers) { - return - } - - s.state = EXECUTED - - s.project.Notify(SERVICE_RESTART_START, s.service.Name(), nil) - - s.err = s.service.Restart() - if s.err != nil { - log.Errorf("Failed to start %s : %v", s.name, s.err) - } else { - s.project.Notify(SERVICE_RESTART, s.service.Name(), nil) - } -} - -func (s *serviceWrapper) Start(wrappers map[string]*serviceWrapper) { - defer s.done.Done() - - if s.state == EXECUTED { - return - } - - if !s.waitForDeps(wrappers) { - return - } - - s.state = EXECUTED - - s.project.Notify(SERVICE_UP_START, s.service.Name(), nil) - - s.err = s.service.Up() - if s.err == ErrRestart { - s.project.Notify(SERVICE_UP, s.service.Name(), nil) - s.project.Notify(PROJECT_RELOAD_TRIGGER, s.service.Name(), nil) - } else if s.err != nil { - log.Errorf("Failed to start %s : %v", s.name, s.err) - } else { - s.project.Notify(SERVICE_UP, s.service.Name(), nil) - } -} - -func (s *serviceWrapper) Create(wrappers map[string]*serviceWrapper) { - defer s.done.Done() - - if s.state == EXECUTED { - return - } - - if !s.waitForDeps(wrappers) { - return - } - - s.state = EXECUTED - - s.project.Notify(SERVICE_CREATE_START, s.service.Name(), nil) - - s.err = s.service.Create() - if s.err != nil { - log.Errorf("Failed to start %s : %v", s.name, s.err) - } else { - s.project.Notify(SERVICE_CREATE, s.service.Name(), nil) - } -} - -func (s *serviceWrapper) Wait() error { - s.done.Wait() - return s.err -} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/test_files/docker-compose.yml b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/test_files/docker-compose.yml deleted file mode 100644 index 20d1de45..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/project/test_files/docker-compose.yml +++ /dev/null @@ -1,25 +0,0 @@ -web: - image: rancher/server:beta - ports: - - "8080:8080" - - "3306:3306" -zk: - image: ibuildthecloud/zookeeper - ports: - - "2181:2181" - environment: - CATTLE: "Prod" -redis: - image: ibuildthecloud/redis - expose: - - "2176" - ports: - - "2222" -app: - image: rancher/server:beta -ubuntu: - image: ubuntu:14.04.1 - command: sleep 60 -master: - image: rancher/build-master - privileged: true diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/util/util.go b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/util/util.go deleted file mode 100644 index 4633947c..00000000 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/librcompose/util/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package util - -import "gopkg.in/yaml.v2" - -func Convert(src, target interface{}) error { - newBytes, err := yaml.Marshal(src) - if err != nil { - return err - } - - return yaml.Unmarshal(newBytes, target) -} - -func ConvertToInterfaceMap(input map[string]string) map[string]interface{} { - result := map[string]interface{}{} - for k, v := range input { - result[k] = v - } - - return result -} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/.gitignore b/Godeps/_workspace/src/github.com/samalba/dockerclient/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/LICENSE b/Godeps/_workspace/src/github.com/samalba/dockerclient/LICENSE new file mode 100644 index 00000000..00e1edb9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Sam Alba + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/README.md b/Godeps/_workspace/src/github.com/samalba/dockerclient/README.md new file mode 100644 index 00000000..5a5027b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/README.md @@ -0,0 +1,98 @@ +Docker client library in Go +=========================== +[![GoDoc](http://godoc.org/github.com/samalba/dockerclient?status.png)](http://godoc.org/github.com/samalba/dockerclient) + +Well maintained docker client library. + +# How to use it? + +Here is an example showing how to use it: + +```go +package main + +import ( + "github.com/samalba/dockerclient" + "log" + "time" + "os" +) + +// Callback used to listen to Docker's events +func eventCallback(event *dockerclient.Event, ec chan error, args ...interface{}) { + log.Printf("Received event: %#v\n", *event) +} + +func main() { + // Init the client + docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) + + // Get only running containers + containers, err := docker.ListContainers(false, false, "") + if err != nil { + log.Fatal(err) + } + for _, c := range containers { + log.Println(c.Id, c.Names) + } + + // Inspect the first container returned + if len(containers) > 0 { + id := containers[0].Id + info, _ := docker.InspectContainer(id) + log.Println(info) + } + + // Build a docker image + // some.tar contains the build context (Dockerfile any any files it needs to add/copy) + dockerBuildContext, err := os.Open("some.tar") + defer dockerBuildContext.Close() + buildImageConfig := &dockerclient.BuildImage{ + Context: dockerBuildContext, + RepoName: "your_image_name", + SuppressOutput: false, + } + reader, err := docker.BuildImage(buildImageConfig) + if err != nil { + log.Fatal(err) + } + + // Create a container + containerConfig := &dockerclient.ContainerConfig{ + Image: "ubuntu:14.04", + Cmd: []string{"bash"}, + AttachStdin: true, + Tty: true} + containerId, err := docker.CreateContainer(containerConfig, "foobar") + if err != nil { + log.Fatal(err) + } + + // Start the container + hostConfig := &dockerclient.HostConfig{} + err = docker.StartContainer(containerId, hostConfig) + if err != nil { + log.Fatal(err) + } + + // Stop the container (with 5 seconds timeout) + docker.StopContainer(containerId, 5) + + // Listen to events + docker.StartMonitorEvents(eventCallback, nil) + + // Hold the execution to look at the events coming + time.Sleep(3600 * time.Second) +} +``` + +# Maintainers + +List of people you can ping for feedback on Pull Requests or any questions. + +- [Sam Alba](https://github.com/samalba) +- [Michael Crosby](https://github.com/crosbymichael) +- [Andrea Luzzardi](https://github.com/aluzzardi) +- [Victor Vieux](https://github.com/vieux) +- [Evan Hazlett](https://github.com/ehazlett) +- [Donald Huang](https://github.com/donhcd) diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/auth.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/auth.go new file mode 100644 index 00000000..48f5f90b --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/auth.go @@ -0,0 +1,38 @@ +package dockerclient + +import ( + "bytes" + "encoding/base64" + "encoding/json" +) + +// AuthConfig hold parameters for authenticating with the docker registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` +} + +// encode the auth configuration struct into base64 for the X-Registry-Auth header +func (c *AuthConfig) encode() (string, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(c); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf.Bytes()), nil +} + +// ConfigFile holds parameters for authenticating during a BuildImage request +type ConfigFile struct { + Configs map[string]AuthConfig `json:"configs,omitempty"` + rootPath string +} + +// encode the configuration struct into base64 for the X-Registry-Config header +func (c *ConfigFile) encode() (string, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(c); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf.Bytes()), nil +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/auth_test.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/auth_test.go new file mode 100644 index 00000000..99801b22 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/auth_test.go @@ -0,0 +1,15 @@ +package dockerclient + +import ( + "testing" +) + +func TestAuthEncode(t *testing.T) { + a := AuthConfig{Username: "foo", Password: "password", Email: "bar@baz.com"} + expected := "eyJ1c2VybmFtZSI6ImZvbyIsInBhc3N3b3JkIjoicGFzc3dvcmQiLCJlbWFpbCI6ImJhckBiYXouY29tIn0K" + got, _ := a.encode() + + if expected != got { + t.Errorf("testAuthEncode failed. Expected [%s] got [%s]", expected, got) + } +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient.go new file mode 100644 index 00000000..d8b11f4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient.go @@ -0,0 +1,714 @@ +package dockerclient + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "sync/atomic" + "time" +) + +const ( + APIVersion = "v1.15" +) + +var ( + ErrNotFound = errors.New("Not found") + + defaultTimeout = 30 * time.Second +) + +type DockerClient struct { + URL *url.URL + HTTPClient *http.Client + TLSConfig *tls.Config + monitorStats int32 + eventStopChan chan (struct{}) +} + +type Error struct { + StatusCode int + Status string + msg string +} + +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Status, e.msg) +} + +func NewDockerClient(daemonUrl string, tlsConfig *tls.Config) (*DockerClient, error) { + return NewDockerClientTimeout(daemonUrl, tlsConfig, time.Duration(defaultTimeout)) +} + +func NewDockerClientTimeout(daemonUrl string, tlsConfig *tls.Config, timeout time.Duration) (*DockerClient, error) { + u, err := url.Parse(daemonUrl) + if err != nil { + return nil, err + } + if u.Scheme == "" || u.Scheme == "tcp" { + if tlsConfig == nil { + u.Scheme = "http" + } else { + u.Scheme = "https" + } + } + httpClient := newHTTPClient(u, tlsConfig, timeout) + return &DockerClient{u, httpClient, tlsConfig, 0, nil}, nil +} + +func (client *DockerClient) doRequest(method string, path string, body []byte, headers map[string]string) ([]byte, error) { + b := bytes.NewBuffer(body) + + reader, err := client.doStreamRequest(method, path, b, headers) + if err != nil { + return nil, err + } + + defer reader.Close() + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + return data, nil +} + +func (client *DockerClient) doStreamRequest(method string, path string, in io.Reader, headers map[string]string) (io.ReadCloser, error) { + if (method == "POST" || method == "PUT") && in == nil { + in = bytes.NewReader(nil) + } + req, err := http.NewRequest(method, client.URL.String()+path, in) + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/json") + if headers != nil { + for header, value := range headers { + req.Header.Add(header, value) + } + } + resp, err := client.HTTPClient.Do(req) + if err != nil { + if !strings.Contains(err.Error(), "connection refused") && client.TLSConfig == nil { + return nil, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + return nil, err + } + if resp.StatusCode == 404 { + return nil, ErrNotFound + } + if resp.StatusCode >= 400 { + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, Error{StatusCode: resp.StatusCode, Status: resp.Status, msg: string(data)} + } + + return resp.Body, nil +} + +func (client *DockerClient) Info() (*Info, error) { + uri := fmt.Sprintf("/%s/info", APIVersion) + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + ret := &Info{} + err = json.Unmarshal(data, &ret) + if err != nil { + return nil, err + } + return ret, nil +} + +func (client *DockerClient) ListContainers(all bool, size bool, filters string) ([]Container, error) { + argAll := 0 + if all == true { + argAll = 1 + } + showSize := 0 + if size == true { + showSize = 1 + } + uri := fmt.Sprintf("/%s/containers/json?all=%d&size=%d", APIVersion, argAll, showSize) + + if filters != "" { + uri += "&filters=" + filters + } + + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + ret := []Container{} + err = json.Unmarshal(data, &ret) + if err != nil { + return nil, err + } + return ret, nil +} + +func (client *DockerClient) InspectContainer(id string) (*ContainerInfo, error) { + uri := fmt.Sprintf("/%s/containers/%s/json", APIVersion, id) + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + info := &ContainerInfo{} + err = json.Unmarshal(data, info) + if err != nil { + return nil, err + } + return info, nil +} + +func (client *DockerClient) CreateContainer(config *ContainerConfig, name string) (string, error) { + data, err := json.Marshal(config) + if err != nil { + return "", err + } + uri := fmt.Sprintf("/%s/containers/create", APIVersion) + if name != "" { + v := url.Values{} + v.Set("name", name) + uri = fmt.Sprintf("%s?%s", uri, v.Encode()) + } + data, err = client.doRequest("POST", uri, data, nil) + if err != nil { + return "", err + } + result := &RespContainersCreate{} + err = json.Unmarshal(data, result) + if err != nil { + return "", err + } + return result.Id, nil +} + +func (client *DockerClient) ContainerLogs(id string, options *LogOptions) (io.ReadCloser, error) { + v := url.Values{} + v.Add("follow", strconv.FormatBool(options.Follow)) + v.Add("stdout", strconv.FormatBool(options.Stdout)) + v.Add("stderr", strconv.FormatBool(options.Stderr)) + v.Add("timestamps", strconv.FormatBool(options.Timestamps)) + if options.Tail > 0 { + v.Add("tail", strconv.FormatInt(options.Tail, 10)) + } + + uri := fmt.Sprintf("/%s/containers/%s/logs?%s", APIVersion, id, v.Encode()) + req, err := http.NewRequest("GET", client.URL.String()+uri, nil) + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/json") + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +func (client *DockerClient) ContainerChanges(id string) ([]*ContainerChanges, error) { + uri := fmt.Sprintf("/%s/containers/%s/changes", APIVersion, id) + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + changes := []*ContainerChanges{} + err = json.Unmarshal(data, &changes) + if err != nil { + return nil, err + } + return changes, nil +} + +func (client *DockerClient) readJSONStream(stream io.ReadCloser, decode func(*json.Decoder) decodingResult, stopChan <-chan struct{}) <-chan decodingResult { + resultChan := make(chan decodingResult) + + go func() { + decodeChan := make(chan decodingResult) + + go func() { + decoder := json.NewDecoder(stream) + for { + decodeResult := decode(decoder) + decodeChan <- decodeResult + if decodeResult.err != nil { + close(decodeChan) + return + } + } + }() + + defer close(resultChan) + + for { + select { + case <-stopChan: + stream.Close() + for range decodeChan { + } + return + case decodeResult := <-decodeChan: + resultChan <- decodeResult + if decodeResult.err != nil { + stream.Close() + return + } + } + } + + }() + + return resultChan +} + +func (client *DockerClient) StartContainer(id string, config *HostConfig) error { + data, err := json.Marshal(config) + if err != nil { + return err + } + uri := fmt.Sprintf("/%s/containers/%s/start", APIVersion, id) + _, err = client.doRequest("POST", uri, data, nil) + if err != nil { + return err + } + return nil +} + +func (client *DockerClient) StopContainer(id string, timeout int) error { + uri := fmt.Sprintf("/%s/containers/%s/stop?t=%d", APIVersion, id, timeout) + _, err := client.doRequest("POST", uri, nil, nil) + if err != nil { + return err + } + return nil +} + +func (client *DockerClient) RestartContainer(id string, timeout int) error { + uri := fmt.Sprintf("/%s/containers/%s/restart?t=%d", APIVersion, id, timeout) + _, err := client.doRequest("POST", uri, nil, nil) + if err != nil { + return err + } + return nil +} + +func (client *DockerClient) KillContainer(id, signal string) error { + uri := fmt.Sprintf("/%s/containers/%s/kill?signal=%s", APIVersion, id, signal) + _, err := client.doRequest("POST", uri, nil, nil) + if err != nil { + return err + } + return nil +} + +func (client *DockerClient) Wait(id string) <-chan WaitResult { + ch := make(chan WaitResult) + uri := fmt.Sprintf("/%s/containers/%s/wait", APIVersion, id) + + go func() { + data, err := client.doRequest("POST", uri, nil, nil) + if err != nil { + ch <- WaitResult{ExitCode: -1, Error: err} + return + } + + var result struct { + StatusCode int `json:"StatusCode"` + } + err = json.Unmarshal(data, &result) + ch <- WaitResult{ExitCode: result.StatusCode, Error: err} + }() + return ch +} + +func (client *DockerClient) MonitorEvents(options *MonitorEventsOptions, stopChan <-chan struct{}) (<-chan EventOrError, error) { + v := url.Values{} + if options != nil { + if options.Since != 0 { + v.Add("since", strconv.Itoa(options.Since)) + } + if options.Until != 0 { + v.Add("until", strconv.Itoa(options.Until)) + } + if options.Filters != nil { + filterMap := make(map[string][]string) + if len(options.Filters.Event) > 0 { + filterMap["event"] = []string{options.Filters.Event} + } + if len(options.Filters.Image) > 0 { + filterMap["image"] = []string{options.Filters.Image} + } + if len(options.Filters.Container) > 0 { + filterMap["container"] = []string{options.Filters.Container} + } + if len(filterMap) > 0 { + filterJSONBytes, err := json.Marshal(filterMap) + if err != nil { + return nil, err + } + v.Add("filters", string(filterJSONBytes)) + } + } + } + uri := fmt.Sprintf("%s/%s/events?%s", client.URL.String(), APIVersion, v.Encode()) + resp, err := client.HTTPClient.Get(uri) + if err != nil { + return nil, err + } + + decode := func(decoder *json.Decoder) decodingResult { + var event Event + if err := decoder.Decode(&event); err != nil { + return decodingResult{err: err} + } else { + return decodingResult{result: event} + } + } + decodingResultChan := client.readJSONStream(resp.Body, decode, stopChan) + eventOrErrorChan := make(chan EventOrError) + go func() { + for decodingResult := range decodingResultChan { + event, _ := decodingResult.result.(Event) + eventOrErrorChan <- EventOrError{ + Event: event, + Error: decodingResult.err, + } + } + close(eventOrErrorChan) + }() + return eventOrErrorChan, nil +} + +func (client *DockerClient) StartMonitorEvents(cb Callback, ec chan error, args ...interface{}) { + client.eventStopChan = make(chan struct{}) + + go func() { + eventErrChan, err := client.MonitorEvents(nil, client.eventStopChan) + if err != nil { + if ec != nil { + ec <- err + } + return + } + + for e := range eventErrChan { + if e.Error != nil { + if ec != nil { + ec <- err + } + return + } + cb(&e.Event, ec, args...) + } + }() +} + +func (client *DockerClient) StopAllMonitorEvents() { + close(client.eventStopChan) +} + +func (client *DockerClient) StartMonitorStats(id string, cb StatCallback, ec chan error, args ...interface{}) { + atomic.StoreInt32(&client.monitorStats, 1) + go client.getStats(id, cb, ec, args...) +} + +func (client *DockerClient) getStats(id string, cb StatCallback, ec chan error, args ...interface{}) { + uri := fmt.Sprintf("%s/%s/containers/%s/stats", client.URL.String(), APIVersion, id) + resp, err := client.HTTPClient.Get(uri) + if err != nil { + ec <- err + return + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + for atomic.LoadInt32(&client.monitorStats) > 0 { + var stats *Stats + if err := dec.Decode(&stats); err != nil { + ec <- err + return + } + cb(id, stats, ec, args...) + } +} + +func (client *DockerClient) StopAllMonitorStats() { + atomic.StoreInt32(&client.monitorStats, 0) +} + +func (client *DockerClient) TagImage(nameOrID string, repo string, tag string, force bool) error { + v := url.Values{} + v.Set("repo", repo) + v.Set("tag", tag) + if force { + v.Set("force", "1") + } + uri := fmt.Sprintf("/%s/images/%s/tag?%s", APIVersion, nameOrID, v.Encode()) + if _, err := client.doRequest("POST", uri, nil, nil); err != nil { + return err + } + return nil +} + +func (client *DockerClient) Version() (*Version, error) { + uri := fmt.Sprintf("/%s/version", APIVersion) + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + version := &Version{} + err = json.Unmarshal(data, version) + if err != nil { + return nil, err + } + return version, nil +} + +func (client *DockerClient) PullImage(name string, auth *AuthConfig) error { + v := url.Values{} + v.Set("fromImage", name) + uri := fmt.Sprintf("/%s/images/create?%s", APIVersion, v.Encode()) + req, err := http.NewRequest("POST", client.URL.String()+uri, nil) + if auth != nil { + encoded_auth, err := auth.encode() + if err != nil { + return err + } + req.Header.Add("X-Registry-Auth", encoded_auth) + } + resp, err := client.HTTPClient.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + if resp.StatusCode == 404 { + return ErrNotFound + } + if resp.StatusCode >= 400 { + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return fmt.Errorf("%s", string(data)) + } + + var finalObj map[string]interface{} + for decoder := json.NewDecoder(resp.Body); err == nil; err = decoder.Decode(&finalObj) { + } + if err != io.EOF { + return err + } + if err, ok := finalObj["error"]; ok { + return fmt.Errorf("%v", err) + } + return nil +} + +func (client *DockerClient) InspectImage(id string) (*ImageInfo, error) { + uri := fmt.Sprintf("/%s/images/%s/json", APIVersion, id) + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + info := &ImageInfo{} + err = json.Unmarshal(data, info) + if err != nil { + return nil, err + } + return info, nil +} + +func (client *DockerClient) LoadImage(reader io.Reader) error { + data, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + uri := fmt.Sprintf("/%s/images/load", APIVersion) + _, err = client.doRequest("POST", uri, data, nil) + if err != nil { + return err + } + return nil +} + +func (client *DockerClient) RemoveContainer(id string, force, volumes bool) error { + argForce := 0 + argVolumes := 0 + if force == true { + argForce = 1 + } + if volumes == true { + argVolumes = 1 + } + args := fmt.Sprintf("force=%d&v=%d", argForce, argVolumes) + uri := fmt.Sprintf("/%s/containers/%s?%s", APIVersion, id, args) + _, err := client.doRequest("DELETE", uri, nil, nil) + return err +} + +func (client *DockerClient) ListImages(all bool) ([]*Image, error) { + argAll := 0 + if all { + argAll = 1 + } + uri := fmt.Sprintf("/%s/images/json?all=%d", APIVersion, argAll) + data, err := client.doRequest("GET", uri, nil, nil) + if err != nil { + return nil, err + } + var images []*Image + if err := json.Unmarshal(data, &images); err != nil { + return nil, err + } + return images, nil +} + +func (client *DockerClient) RemoveImage(name string) ([]*ImageDelete, error) { + uri := fmt.Sprintf("/%s/images/%s", APIVersion, name) + data, err := client.doRequest("DELETE", uri, nil, nil) + if err != nil { + return nil, err + } + var imageDelete []*ImageDelete + if err := json.Unmarshal(data, &imageDelete); err != nil { + return nil, err + } + return imageDelete, nil +} + +func (client *DockerClient) PauseContainer(id string) error { + uri := fmt.Sprintf("/%s/containers/%s/pause", APIVersion, id) + _, err := client.doRequest("POST", uri, nil, nil) + if err != nil { + return err + } + return nil +} +func (client *DockerClient) UnpauseContainer(id string) error { + uri := fmt.Sprintf("/%s/containers/%s/unpause", APIVersion, id) + _, err := client.doRequest("POST", uri, nil, nil) + if err != nil { + return err + } + return nil +} + +func (client *DockerClient) Exec(config *ExecConfig) (string, error) { + data, err := json.Marshal(config) + if err != nil { + return "", err + } + uri := fmt.Sprintf("/containers/%s/exec", config.Container) + resp, err := client.doRequest("POST", uri, data, nil) + if err != nil { + return "", err + } + var createExecResp struct { + Id string + } + if err = json.Unmarshal(resp, &createExecResp); err != nil { + return "", err + } + uri = fmt.Sprintf("/exec/%s/start", createExecResp.Id) + resp, err = client.doRequest("POST", uri, data, nil) + if err != nil { + return "", err + } + return createExecResp.Id, nil +} + +func (client *DockerClient) RenameContainer(oldName string, newName string) error { + uri := fmt.Sprintf("/containers/%s/rename?name=%s", oldName, newName) + _, err := client.doRequest("POST", uri, nil, nil) + return err +} + +func (client *DockerClient) ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error) { + var fromSrc string + v := &url.Values{} + if source == "" { + fromSrc = "-" + } else { + fromSrc = source + } + + v.Set("fromSrc", fromSrc) + v.Set("repo", repository) + if tag != "" { + v.Set("tag", tag) + } + + var in io.Reader + if fromSrc == "-" { + in = tar + } + return client.doStreamRequest("POST", "/images/create?"+v.Encode(), in, nil) +} + +func (client *DockerClient) BuildImage(image *BuildImage) (io.ReadCloser, error) { + v := url.Values{} + + if image.DockerfileName != "" { + v.Set("dockerfile", image.DockerfileName) + } + if image.RepoName != "" { + v.Set("t", image.RepoName) + } + if image.RemoteURL != "" { + v.Set("remote", image.RemoteURL) + } + if image.NoCache { + v.Set("nocache", "1") + } + if image.Pull { + v.Set("pull", "1") + } + if image.Remove { + v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + if image.ForceRemove { + v.Set("forcerm", "1") + } + if image.SuppressOutput { + v.Set("q", "1") + } + + v.Set("memory", strconv.FormatInt(image.Memory, 10)) + v.Set("memswap", strconv.FormatInt(image.MemorySwap, 10)) + v.Set("cpushares", strconv.FormatInt(image.CpuShares, 10)) + v.Set("cpuperiod", strconv.FormatInt(image.CpuPeriod, 10)) + v.Set("cpuquota", strconv.FormatInt(image.CpuQuota, 10)) + v.Set("cpusetcpus", image.CpuSetCpus) + v.Set("cpusetmems", image.CpuSetMems) + v.Set("cgroupparent", image.CgroupParent) + + headers := make(map[string]string) + if image.Config != nil { + encoded_config, err := image.Config.encode() + if err != nil { + return nil, err + } + headers["X-Registry-Config"] = encoded_config + } + if image.Context != nil { + headers["Content-Type"] = "application/tar" + } + + uri := fmt.Sprintf("/%s/build?%s", APIVersion, v.Encode()) + return client.doStreamRequest("POST", uri, image.Context, headers) +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient_test.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient_test.go new file mode 100644 index 00000000..88257e01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/dockerclient_test.go @@ -0,0 +1,240 @@ +package dockerclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/stdcopy" +) + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func testDockerClient(t *testing.T) *DockerClient { + client, err := NewDockerClient(testHTTPServer.URL, nil) + if err != nil { + t.Fatal("Cannot init the docker client") + } + return client +} + +func TestInfo(t *testing.T) { + client := testDockerClient(t) + info, err := client.Info() + if err != nil { + t.Fatal("Cannot get server info") + } + assertEqual(t, info.Images, int64(1), "") + assertEqual(t, info.Containers, int64(2), "") +} + +func TestKillContainer(t *testing.T) { + client := testDockerClient(t) + if err := client.KillContainer("23132acf2ac", "5"); err != nil { + t.Fatal("cannot kill container: %s", err) + } +} + +func TestWait(t *testing.T) { + client := testDockerClient(t) + + // This provokes an error on the server. + select { + case wr := <-client.Wait("1234"): + assertEqual(t, wr.ExitCode, int(-1), "") + case <-time.After(2 * time.Second): + t.Fatal("Timed out!") + } + + // Valid case. + select { + case wr := <-client.Wait("valid-id"): + assertEqual(t, wr.ExitCode, int(0), "") + case <-time.After(2 * time.Second): + t.Fatal("Timed out!") + } +} + +func TestPullImage(t *testing.T) { + client := testDockerClient(t) + err := client.PullImage("busybox", nil) + if err != nil { + t.Fatal("unable to pull busybox") + } + + err = client.PullImage("haproxy", nil) + if err != nil { + t.Fatal("unable to pull haproxy") + } + + err = client.PullImage("wrongimg", nil) + if err == nil { + t.Fatal("should return error when it fails to pull wrongimg") + } +} + +func TestListContainers(t *testing.T) { + client := testDockerClient(t) + containers, err := client.ListContainers(true, false, "") + if err != nil { + t.Fatal("cannot get containers: %s", err) + } + assertEqual(t, len(containers), 1, "") + cnt := containers[0] + assertEqual(t, cnt.SizeRw, int64(0), "") +} + +func TestContainerChanges(t *testing.T) { + client := testDockerClient(t) + changes, err := client.ContainerChanges("foobar") + if err != nil { + t.Fatal("cannot get container changes: %s", err) + } + assertEqual(t, len(changes), 3, "unexpected number of changes") + c := changes[0] + assertEqual(t, c.Path, "/dev", "unexpected") + assertEqual(t, c.Kind, 0, "unexpected") +} + +func TestListContainersWithSize(t *testing.T) { + client := testDockerClient(t) + containers, err := client.ListContainers(true, true, "") + if err != nil { + t.Fatal("cannot get containers: %s", err) + } + assertEqual(t, len(containers), 1, "") + cnt := containers[0] + assertEqual(t, cnt.SizeRw, int64(123), "") +} +func TestListContainersWithFilters(t *testing.T) { + client := testDockerClient(t) + containers, err := client.ListContainers(true, true, "{'id':['332375cfbc23edb921a21026314c3497674ba8bdcb2c85e0e65ebf2017f688ce']}") + if err != nil { + t.Fatal("cannot get containers: %s", err) + } + assertEqual(t, len(containers), 1, "") + + containers, err = client.ListContainers(true, true, "{'id':['332375cfbc23edb921a21026314c3497674ba8bdcb2c85e0e65ebf2017f688cf']}") + if err != nil { + t.Fatal("cannot get containers: %s", err) + } + assertEqual(t, len(containers), 0, "") +} + +func TestContainerLogs(t *testing.T) { + client := testDockerClient(t) + containerId := "foobar" + logOptions := &LogOptions{ + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: 10, + } + logsReader, err := client.ContainerLogs(containerId, logOptions) + if err != nil { + t.Fatal("cannot read logs from server") + } + + stdoutBuffer := new(bytes.Buffer) + stderrBuffer := new(bytes.Buffer) + if _, err = stdcopy.StdCopy(stdoutBuffer, stderrBuffer, logsReader); err != nil { + t.Fatal("cannot read logs from logs reader") + } + stdoutLogs := strings.TrimSpace(stdoutBuffer.String()) + stderrLogs := strings.TrimSpace(stderrBuffer.String()) + stdoutLogLines := strings.Split(stdoutLogs, "\n") + stderrLogLines := strings.Split(stderrLogs, "\n") + if len(stdoutLogLines) != 5 { + t.Fatalf("wrong number of stdout logs: len=%d", len(stdoutLogLines)) + } + if len(stderrLogLines) != 5 { + t.Fatalf("wrong number of stderr logs: len=%d", len(stdoutLogLines)) + } + for i, line := range stdoutLogLines { + expectedSuffix := fmt.Sprintf("Z line %d", 41+2*i) + if !strings.HasSuffix(line, expectedSuffix) { + t.Fatalf("expected stdout log line \"%s\" to end with \"%s\"", line, expectedSuffix) + } + } + for i, line := range stderrLogLines { + expectedSuffix := fmt.Sprintf("Z line %d", 40+2*i) + if !strings.HasSuffix(line, expectedSuffix) { + t.Fatalf("expected stderr log line \"%s\" to end with \"%s\"", line, expectedSuffix) + } + } +} + +func TestMonitorEvents(t *testing.T) { + client := testDockerClient(t) + decoder := json.NewDecoder(bytes.NewBufferString(eventsResp)) + var expectedEvents []Event + for { + var event Event + if err := decoder.Decode(&event); err != nil { + if err == io.EOF { + break + } else { + t.Fatalf("cannot parse expected resp: %s", err.Error()) + } + } else { + expectedEvents = append(expectedEvents, event) + } + } + + // test passing stop chan + stopChan := make(chan struct{}) + eventInfoChan, err := client.MonitorEvents(nil, stopChan) + if err != nil { + t.Fatalf("cannot get events from server: %s", err.Error()) + } + + eventInfo := <-eventInfoChan + if eventInfo.Error != nil || eventInfo.Event != expectedEvents[0] { + t.Fatalf("got:\n%#v\nexpected:\n%#v", eventInfo, expectedEvents[0]) + } + close(stopChan) + for i := 0; i < 3; i++ { + _, ok := <-eventInfoChan + if i == 2 && ok { + t.Fatalf("read more than 2 events successfully after closing stopChan") + } + } + + // test when you don't pass stop chan + eventInfoChan, err = client.MonitorEvents(nil, nil) + if err != nil { + t.Fatalf("cannot get events from server: %s", err.Error()) + } + + for i, expectedEvent := range expectedEvents { + t.Logf("on iter %d\n", i) + eventInfo := <-eventInfoChan + if eventInfo.Error != nil || eventInfo.Event != expectedEvent { + t.Fatalf("index %d, got:\n%#v\nexpected:\n%#v", i, eventInfo, expectedEvent) + } + t.Logf("done with iter %d\n", i) + } +} + +func TestDockerClientInterface(t *testing.T) { + iface := reflect.TypeOf((*Client)(nil)).Elem() + test := testDockerClient(t) + + if !reflect.TypeOf(test).Implements(iface) { + t.Fatalf("DockerClient does not implement the Client interface") + } +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/engine_mock_test.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/engine_mock_test.go new file mode 100644 index 00000000..7d3a6d93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/engine_mock_test.go @@ -0,0 +1,245 @@ +package dockerclient + +import ( + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "strconv" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/timeutils" + "github.com/gorilla/mux" +) + +var ( + testHTTPServer *httptest.Server +) + +func init() { + r := mux.NewRouter() + baseURL := "/" + APIVersion + r.HandleFunc(baseURL+"/info", handlerGetInfo).Methods("GET") + r.HandleFunc(baseURL+"/containers/json", handlerGetContainers).Methods("GET") + r.HandleFunc(baseURL+"/containers/{id}/logs", handleContainerLogs).Methods("GET") + r.HandleFunc(baseURL+"/containers/{id}/changes", handleContainerChanges).Methods("GET") + r.HandleFunc(baseURL+"/containers/{id}/kill", handleContainerKill).Methods("POST") + r.HandleFunc(baseURL+"/containers/{id}/wait", handleWait).Methods("POST") + r.HandleFunc(baseURL+"/images/create", handleImagePull).Methods("POST") + r.HandleFunc(baseURL+"/events", handleEvents).Methods("GET") + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + log.Printf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func handleContainerKill(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "{%q:%q", "Id", "421373210afd132") +} + +func handleWait(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + if vars["id"] == "valid-id" { + fmt.Fprintf(w, `{"StatusCode":0}`) + } else { + http.Error(w, "failed", 500) + } +} + +func handleImagePull(w http.ResponseWriter, r *http.Request) { + imageName := r.URL.Query()["fromImage"][0] + responses := []map[string]interface{}{{ + "status": fmt.Sprintf("Pulling repository mydockerregistry/%s", imageName), + }} + switch imageName { + case "busybox": + responses = append(responses, map[string]interface{}{ + "status": "Status: Image is up to date for mydockerregistry/busybox", + }) + case "haproxy": + fmt.Fprintf(w, haproxyPullOutput) + return + default: + errorMsg := fmt.Sprintf("Error: image %s not found", imageName) + responses = append(responses, map[string]interface{}{ + "errorDetail": map[string]interface{}{ + "message": errorMsg, + }, + "error": errorMsg, + }) + } + for _, response := range responses { + json.NewEncoder(w).Encode(response) + } +} + +func handleContainerLogs(w http.ResponseWriter, r *http.Request) { + var outStream, errStream io.Writer + outStream = ioutils.NewWriteFlusher(w) + + // not sure how to test follow + if err := r.ParseForm(); err != nil { + http.Error(w, err.Error(), 500) + } + stdout, stderr := getBoolValue(r.Form.Get("stdout")), getBoolValue(r.Form.Get("stderr")) + if stderr { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + } + if stdout { + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + var i int + if tail, err := strconv.Atoi(r.Form.Get("tail")); err == nil && tail > 0 { + i = 50 - tail + if i < 0 { + i = 0 + } + } + for ; i < 50; i++ { + line := fmt.Sprintf("line %d", i) + if getBoolValue(r.Form.Get("timestamps")) { + l := &jsonlog.JSONLog{Log: line, Created: time.Now().UTC()} + line = fmt.Sprintf("%s %s", l.Created.Format(timeutils.RFC3339NanoFixed), line) + } + if i%2 == 0 && stderr { + fmt.Fprintln(errStream, line) + } else if i%2 == 1 && stdout { + fmt.Fprintln(outStream, line) + } + } +} + +func handleContainerChanges(w http.ResponseWriter, r *http.Request) { + writeHeaders(w, 200, "changes") + body := `[ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ]` + w.Write([]byte(body)) +} + +func getBoolValue(boolString string) bool { + switch boolString { + case "1": + return true + case "True": + return true + case "true": + return true + default: + return false + } +} + +func writeHeaders(w http.ResponseWriter, code int, jobName string) { + h := w.Header() + h.Add("Content-Type", "application/json") + if jobName != "" { + h.Add("Job-Name", jobName) + } + w.WriteHeader(code) +} + +func handlerGetInfo(w http.ResponseWriter, r *http.Request) { + writeHeaders(w, 200, "info") + body := `{ + "Containers": 2, + "Debug": 1, + "Driver": "aufs", + "DriverStatus": [["Root Dir", "/mnt/sda1/var/lib/docker/aufs"], + ["Dirs", "0"]], + "ExecutionDriver": "native-0.2", + "IPv4Forwarding": 1, + "Images": 1, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/local/bin/docker", + "InitSha1": "", + "KernelVersion": "3.16.4-tinycore64", + "MemoryLimit": 1, + "NEventsListener": 0, + "NFd": 10, + "NGoroutines": 11, + "OperatingSystem": "Boot2Docker 1.3.1 (TCL 5.4); master : a083df4 - Thu Jan 01 00:00:00 UTC 1970", + "SwapLimit": 1}` + w.Write([]byte(body)) +} + +func handlerGetContainers(w http.ResponseWriter, r *http.Request) { + writeHeaders(w, 200, "containers") + body := `[ + { + "Status": "Up 39 seconds", + "Ports": [ + { + "Type": "tcp", + "PublicPort": 49163, + "PrivatePort": 8080, + "IP": "0.0.0.0" + } + ], + "Names": [ + "/trusting_heisenberg" + ], + "Image": "foo:latest", + "Id": "332375cfbc23edb921a21026314c3497674ba8bdcb2c85e0e65ebf2017f688ce", + "Created": 1415720105, + "Command": "/bin/go-run" + } + ]` + if v, ok := r.URL.Query()["size"]; ok { + if v[0] == "1" { + body = `[ + { + "Status": "Up 39 seconds", + "Ports": [ + { + "Type": "tcp", + "PublicPort": 49163, + "PrivatePort": 8080, + "IP": "0.0.0.0" + } + ], + "Names": [ + "/trusting_heisenberg" + ], + "Image": "foo:latest", + "Id": "332375cfbc23edb921a21026314c3497674ba8bdcb2c85e0e65ebf2017f688ce", + "Created": 1415720105, + "SizeRootFs": 12345, + "SizeRW": 123, + "Command": "/bin/go-run" + } + ]` + } + } + if v, ok := r.URL.Query()["filters"]; ok { + if v[0] != "{'id':['332375cfbc23edb921a21026314c3497674ba8bdcb2c85e0e65ebf2017f688ce']}" { + body = "[]" + } + } + w.Write([]byte(body)) +} + +func handleEvents(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(eventsResp)) +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/example_responses.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/example_responses.go new file mode 100644 index 00000000..670508c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/example_responses.go @@ -0,0 +1,13 @@ +package dockerclient + +var haproxyPullOutput = `{"status":"The image you are pulling has been verified","id":"haproxy:1"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"ecb4b23ca7ce"}{"status":"Already exists","progressDetail":{},"id":"f453e940c177"}{"status":"Already exists","progressDetail":{},"id":"fc5ea1bc05ab"}{"status":"Already exists","progressDetail":{},"id":"380557f8f7b3"}{"status":"The image you are pulling has been verified","id":"haproxy:1.4"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"63a1b9929e14"}{"status":"Already exists","progressDetail":{},"id":"af43bf7d176e"}{"status":"Already exists","progressDetail":{},"id":"851aac2d69aa"}{"status":"Already exists","progressDetail":{},"id":"345053a92c95"}{"status":"Already exists","progressDetail":{},"id":"b41231d429c9"}{"status":"The image you are pulling has been verified","id":"haproxy:1.4.25"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"63a1b9929e14"}{"status":"Already exists","progressDetail":{},"id":"af43bf7d176e"}{"status":"Already exists","progressDetail":{},"id":"851aac2d69aa"}{"status":"Already exists","progressDetail":{},"id":"345053a92c95"}{"status":"Already exists","progressDetail":{},"id":"b41231d429c9"}{"status":"The image you are pulling has been verified","id":"haproxy:1.5"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"ecb4b23ca7ce"}{"status":"Already exists","progressDetail":{},"id":"f453e940c177"}{"status":"Already exists","progressDetail":{},"id":"fc5ea1bc05ab"}{"status":"Already exists","progressDetail":{},"id":"380557f8f7b3"}{"status":"The image you are pulling has been verified","id":"haproxy:1.5.10"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"ecb4b23ca7ce"}{"status":"Already exists","progressDetail":{},"id":"f453e940c177"}{"status":"Already exists","progressDetail":{},"id":"fc5ea1bc05ab"}{"status":"Already exists","progressDetail":{},"id":"380557f8f7b3"}{"status":"The image you are pulling has been verified","id":"haproxy:1.5.9"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"3d894e6f7e63"}{"status":"Already exists","progressDetail":{},"id":"4d949c40bc77"}{"status":"Already exists","progressDetail":{},"id":"55e031889365"}{"status":"Already exists","progressDetail":{},"id":"c7aa675e1876"}{"status":"The image you are pulling has been verified","id":"haproxy:latest"} +{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"ecb4b23ca7ce"}{"status":"Already exists","progressDetail":{},"id":"f453e940c177"}{"status":"Already exists","progressDetail":{},"id":"fc5ea1bc05ab"}{"status":"Already exists","progressDetail":{},"id":"380557f8f7b3"}{"status":"Status: Image is up to date for haproxy"} +` + +var eventsResp = `{"status":"pull","id":"nginx:latest","time":1428620433}{"status":"create","id":"9b818c3b8291708fdcecd7c4086b75c222cb503be10a93d9c11040886032a48b","from":"nginx:latest","time":1428620433}{"status":"start","id":"9b818c3b8291708fdcecd7c4086b75c222cb503be10a93d9c11040886032a48b","from":"nginx:latest","time":1428620433}{"status":"die","id":"9b818c3b8291708fdcecd7c4086b75c222cb503be10a93d9c11040886032a48b","from":"nginx:latest","time":1428620442}{"status":"create","id":"352d0b412aae5a5d2b14ae9d88be59dc276602d9edb9dcc33e138e475b3e4720","from":"52.11.96.81/foobar/ubuntu:latest","time":1428620444}{"status":"start","id":"352d0b412aae5a5d2b14ae9d88be59dc276602d9edb9dcc33e138e475b3e4720","from":"52.11.96.81/foobar/ubuntu:latest","time":1428620444}{"status":"die","id":"352d0b412aae5a5d2b14ae9d88be59dc276602d9edb9dcc33e138e475b3e4720","from":"52.11.96.81/foobar/ubuntu:latest","time":1428620444}{"status":"pull","id":"debian:latest","time":1428620453}{"status":"create","id":"668887b5729946546b3072655dc6da08f0e3210111b68b704eb842adfce53f6c","from":"debian:latest","time":1428620453}{"status":"start","id":"668887b5729946546b3072655dc6da08f0e3210111b68b704eb842adfce53f6c","from":"debian:latest","time":1428620453}{"status":"die","id":"668887b5729946546b3072655dc6da08f0e3210111b68b704eb842adfce53f6c","from":"debian:latest","time":1428620453}{"status":"create","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620458}{"status":"start","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620458}{"status":"pause","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620462}{"status":"unpause","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620466}{"status":"die","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620469}` diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/examples/events.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/examples/events.go new file mode 100644 index 00000000..2d6de40c --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/examples/events.go @@ -0,0 +1,39 @@ +package main + +import ( + "github.com/samalba/dockerclient" + "log" + "os" + "os/signal" + "syscall" +) + +func eventCallback(e *dockerclient.Event, ec chan error, args ...interface{}) { + log.Println(e) +} + +var ( + client *dockerclient.DockerClient +) + +func waitForInterrupt() { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) + for _ = range sigChan { + client.StopAllMonitorEvents() + os.Exit(0) + } +} + +func main() { + docker, err := dockerclient.NewDockerClient(os.Getenv("DOCKER_HOST"), nil) + if err != nil { + log.Fatal(err) + } + + client = docker + + client.StartMonitorEvents(eventCallback, nil) + + waitForInterrupt() +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/examples/stats/stats.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/examples/stats/stats.go new file mode 100644 index 00000000..9027069d --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/examples/stats/stats.go @@ -0,0 +1,43 @@ +package main + +import ( + "github.com/samalba/dockerclient" + "log" + "os" + "os/signal" + "syscall" +) + +func statCallback(id string, stat *dockerclient.Stats, ec chan error, args ...interface{}) { + log.Println(stat) +} + +func waitForInterrupt() { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) + for _ = range sigChan { + os.Exit(0) + } +} + +func main() { + docker, err := dockerclient.NewDockerClient(os.Getenv("DOCKER_HOST"), nil) + if err != nil { + log.Fatal(err) + } + + containerConfig := &dockerclient.ContainerConfig{Image: "busybox", Cmd: []string{"sh"}} + containerId, err := docker.CreateContainer(containerConfig, "") + if err != nil { + log.Fatal(err) + } + + // Start the container + err = docker.StartContainer(containerId, nil) + if err != nil { + log.Fatal(err) + } + docker.StartMonitorStats(containerId, statCallback, nil) + + waitForInterrupt() +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/interface.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/interface.go new file mode 100644 index 00000000..8a488119 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/interface.go @@ -0,0 +1,46 @@ +package dockerclient + +import ( + "io" +) + +type Callback func(*Event, chan error, ...interface{}) + +type StatCallback func(string, *Stats, chan error, ...interface{}) + +type Client interface { + Info() (*Info, error) + ListContainers(all, size bool, filters string) ([]Container, error) + InspectContainer(id string) (*ContainerInfo, error) + InspectImage(id string) (*ImageInfo, error) + CreateContainer(config *ContainerConfig, name string) (string, error) + ContainerLogs(id string, options *LogOptions) (io.ReadCloser, error) + ContainerChanges(id string) ([]*ContainerChanges, error) + Exec(config *ExecConfig) (string, error) + StartContainer(id string, config *HostConfig) error + StopContainer(id string, timeout int) error + RestartContainer(id string, timeout int) error + KillContainer(id, signal string) error + Wait(id string) <-chan WaitResult + // MonitorEvents takes options and an optional stop channel, and returns + // an EventOrError channel. If an error is ever sent, then no more + // events will be sent. If a stop channel is provided, events will stop + // being monitored after the stop channel is closed. + MonitorEvents(options *MonitorEventsOptions, stopChan <-chan struct{}) (<-chan EventOrError, error) + StartMonitorEvents(cb Callback, ec chan error, args ...interface{}) + StopAllMonitorEvents() + StartMonitorStats(id string, cb StatCallback, ec chan error, args ...interface{}) + StopAllMonitorStats() + TagImage(nameOrID string, repo string, tag string, force bool) error + Version() (*Version, error) + PullImage(name string, auth *AuthConfig) error + LoadImage(reader io.Reader) error + RemoveContainer(id string, force, volumes bool) error + ListImages(all bool) ([]*Image, error) + RemoveImage(name string) ([]*ImageDelete, error) + PauseContainer(name string) error + UnpauseContainer(name string) error + RenameContainer(oldName string, newName string) error + ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error) + BuildImage(image *BuildImage) (io.ReadCloser, error) +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock.go new file mode 100644 index 00000000..ff982663 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock.go @@ -0,0 +1,162 @@ +package mockclient + +import ( + "io" + + "github.com/samalba/dockerclient" + "github.com/stretchr/testify/mock" +) + +type MockClient struct { + mock.Mock +} + +func NewMockClient() *MockClient { + return &MockClient{} +} + +func (client *MockClient) Info() (*dockerclient.Info, error) { + args := client.Mock.Called() + return args.Get(0).(*dockerclient.Info), args.Error(1) +} + +func (client *MockClient) ListContainers(all bool, size bool, filters string) ([]dockerclient.Container, error) { + args := client.Mock.Called(all, size, filters) + return args.Get(0).([]dockerclient.Container), args.Error(1) +} + +func (client *MockClient) InspectContainer(id string) (*dockerclient.ContainerInfo, error) { + args := client.Mock.Called(id) + return args.Get(0).(*dockerclient.ContainerInfo), args.Error(1) +} + +func (client *MockClient) InspectImage(id string) (*dockerclient.ImageInfo, error) { + args := client.Mock.Called(id) + return args.Get(0).(*dockerclient.ImageInfo), args.Error(1) +} + +func (client *MockClient) CreateContainer(config *dockerclient.ContainerConfig, name string) (string, error) { + args := client.Mock.Called(config, name) + return args.String(0), args.Error(1) +} + +func (client *MockClient) ContainerLogs(id string, options *dockerclient.LogOptions) (io.ReadCloser, error) { + args := client.Mock.Called(id, options) + return args.Get(0).(io.ReadCloser), args.Error(1) +} + +func (client *MockClient) ContainerChanges(id string) ([]*dockerclient.ContainerChanges, error) { + args := client.Mock.Called(id) + return args.Get(0).([]*dockerclient.ContainerChanges), args.Error(1) +} + +func (client *MockClient) StartContainer(id string, config *dockerclient.HostConfig) error { + args := client.Mock.Called(id, config) + return args.Error(0) +} + +func (client *MockClient) StopContainer(id string, timeout int) error { + args := client.Mock.Called(id, timeout) + return args.Error(0) +} + +func (client *MockClient) RestartContainer(id string, timeout int) error { + args := client.Mock.Called(id, timeout) + return args.Error(0) +} + +func (client *MockClient) KillContainer(id, signal string) error { + args := client.Mock.Called(id, signal) + return args.Error(0) +} + +func (client *MockClient) Wait(id string) <-chan dockerclient.WaitResult { + args := client.Mock.Called(id) + return args.Get(0).(<-chan dockerclient.WaitResult) +} + +func (client *MockClient) MonitorEvents(options *dockerclient.MonitorEventsOptions, stopChan <-chan struct{}) (<-chan dockerclient.EventOrError, error) { + args := client.Mock.Called(options, stopChan) + return args.Get(0).(<-chan dockerclient.EventOrError), args.Error(1) +} + +func (client *MockClient) StartMonitorEvents(cb dockerclient.Callback, ec chan error, args ...interface{}) { + client.Mock.Called(cb, ec, args) +} + +func (client *MockClient) StopAllMonitorEvents() { + client.Mock.Called() +} + +func (client *MockClient) TagImage(nameOrID string, repo string, tag string, force bool) error { + args := client.Mock.Called(nameOrID, repo, tag, force) + return args.Error(0) +} + +func (client *MockClient) StartMonitorStats(id string, cb dockerclient.StatCallback, ec chan error, args ...interface{}) { + client.Mock.Called(id, cb, ec, args) +} + +func (client *MockClient) StopAllMonitorStats() { + client.Mock.Called() +} + +func (client *MockClient) Version() (*dockerclient.Version, error) { + args := client.Mock.Called() + return args.Get(0).(*dockerclient.Version), args.Error(1) +} + +func (client *MockClient) PullImage(name string, auth *dockerclient.AuthConfig) error { + args := client.Mock.Called(name, auth) + return args.Error(0) +} + +func (client *MockClient) LoadImage(reader io.Reader) error { + args := client.Mock.Called(reader) + return args.Error(0) +} + +func (client *MockClient) RemoveContainer(id string, force, volumes bool) error { + args := client.Mock.Called(id, force, volumes) + return args.Error(0) +} + +func (client *MockClient) ListImages(all bool) ([]*dockerclient.Image, error) { + args := client.Mock.Called(all) + return args.Get(0).([]*dockerclient.Image), args.Error(1) +} + +func (client *MockClient) RemoveImage(name string) ([]*dockerclient.ImageDelete, error) { + args := client.Mock.Called(name) + return args.Get(0).([]*dockerclient.ImageDelete), args.Error(1) +} + +func (client *MockClient) PauseContainer(name string) error { + args := client.Mock.Called(name) + return args.Error(0) +} + +func (client *MockClient) UnpauseContainer(name string) error { + args := client.Mock.Called(name) + return args.Error(0) +} + +func (client *MockClient) Exec(config *dockerclient.ExecConfig) (string, error) { + args := client.Mock.Called(config) + return args.String(0), args.Error(1) +} + +func (client *MockClient) RenameContainer(oldName string, newName string) error { + args := client.Mock.Called(oldName, newName) + return args.Error(0) +} + +func (client *MockClient) ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error) { + args := client.Mock.Called(source, repository, tag, tar) + return args.Get(0).(io.ReadCloser), args.Error(1) +} + +func (client *MockClient) BuildImage(image *dockerclient.BuildImage) (io.ReadCloser, error) { + args := client.Mock.Called(image) + return args.Get(0).(io.ReadCloser), args.Error(1) +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock_test.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock_test.go new file mode 100644 index 00000000..8d91bcf6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/mockclient/mock_test.go @@ -0,0 +1,32 @@ +package mockclient + +import ( + "reflect" + "testing" + + "github.com/samalba/dockerclient" +) + +func TestMock(t *testing.T) { + mock := NewMockClient() + mock.On("Version").Return(&dockerclient.Version{Version: "foo"}, nil).Once() + + v, err := mock.Version() + if err != nil { + t.Fatal(err) + } + if v.Version != "foo" { + t.Fatal(v) + } + + mock.Mock.AssertExpectations(t) +} + +func TestMockInterface(t *testing.T) { + iface := reflect.TypeOf((*dockerclient.Client)(nil)).Elem() + mock := NewMockClient() + + if !reflect.TypeOf(mock).Implements(iface) { + t.Fatalf("Mock does not implement the Client interface") + } +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/types.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/types.go new file mode 100644 index 00000000..15e1e8bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/types.go @@ -0,0 +1,444 @@ +package dockerclient + +import ( + "fmt" + "io" + "time" + + "github.com/docker/docker/pkg/units" +) + +type ContainerConfig struct { + Hostname string + Domainname string + User string + AttachStdin bool + AttachStdout bool + AttachStderr bool + ExposedPorts map[string]struct{} + Tty bool + OpenStdin bool + StdinOnce bool + Env []string + Cmd []string + Image string + Volumes map[string]struct{} + VolumeDriver string + WorkingDir string + Entrypoint []string + NetworkDisabled bool + MacAddress string + OnBuild []string + Labels map[string]string + + // FIXME: The following fields have been removed since API v1.18 + Memory int64 + MemorySwap int64 + CpuShares int64 + Cpuset string + PortSpecs []string + + // This is used only by the create command + HostConfig HostConfig +} + +type HostConfig struct { + Binds []string + ContainerIDFile string + LxcConf []map[string]string + Memory int64 + MemorySwap int64 + CpuShares int64 + CpuPeriod int64 + CpusetCpus string + CpusetMems string + CpuQuota int64 + BlkioWeight int64 + OomKillDisable bool + Privileged bool + PortBindings map[string][]PortBinding + Links []string + PublishAllPorts bool + Dns []string + DnsSearch []string + ExtraHosts []string + VolumesFrom []string + Devices []DeviceMapping + NetworkMode string + IpcMode string + PidMode string + UTSMode string + CapAdd []string + CapDrop []string + RestartPolicy RestartPolicy + SecurityOpt []string + ReadonlyRootfs bool + Ulimits []Ulimit + LogConfig LogConfig + CgroupParent string +} + +type DeviceMapping struct { + PathOnHost string `json:"PathOnHost"` + PathInContainer string `json:"PathInContainer"` + CgroupPermissions string `json:"CgroupPermissions"` +} + +type ExecConfig struct { + AttachStdin bool + AttachStdout bool + AttachStderr bool + Tty bool + Cmd []string + Container string + Detach bool +} + +type LogOptions struct { + Follow bool + Stdout bool + Stderr bool + Timestamps bool + Tail int64 +} + +type MonitorEventsFilters struct { + Event string `json:",omitempty"` + Image string `json:",omitempty"` + Container string `json:",omitempty"` +} + +type MonitorEventsOptions struct { + Since int + Until int + Filters *MonitorEventsFilters `json:",omitempty"` +} + +type RestartPolicy struct { + Name string + MaximumRetryCount int64 +} + +type PortBinding struct { + HostIp string + HostPort string +} + +type State struct { + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + Ghost bool +} + +// String returns a human-readable description of the state +// Stoken from docker/docker/daemon/state.go +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.Dead { + return "Dead" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +// Stoken from docker/docker/daemon/state.go +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + return "exited" +} + +type ImageInfo struct { + Architecture string + Author string + Comment string + Config *ContainerConfig + Container string + ContainerConfig *ContainerConfig + Created time.Time + DockerVersion string + Id string + Os string + Parent string + Size int64 + VirtualSize int64 +} + +type ContainerInfo struct { + Id string + Created string + Path string + Name string + Args []string + ExecIDs []string + Config *ContainerConfig + State *State + Image string + NetworkSettings struct { + IPAddress string `json:"IpAddress"` + IPPrefixLen int `json:"IpPrefixLen"` + Gateway string + Bridge string + Ports map[string][]PortBinding + } + SysInitPath string + ResolvConfPath string + Volumes map[string]string + HostConfig *HostConfig +} + +type ContainerChanges struct { + Path string + Kind int +} + +type Port struct { + IP string + PrivatePort int + PublicPort int + Type string +} + +type Container struct { + Id string + Names []string + Image string + Command string + Created int64 + Status string + Ports []Port + SizeRw int64 + SizeRootFs int64 + Labels map[string]string +} + +type Event struct { + Id string + Status string + From string + Time int64 +} + +type Version struct { + ApiVersion string + Arch string + GitCommit string + GoVersion string + KernelVersion string + Os string + Version string +} + +type RespContainersCreate struct { + Id string + Warnings []string +} + +type Image struct { + Created int64 + Id string + ParentId string + RepoTags []string + Size int64 + VirtualSize int64 +} + +// Info is the struct returned by /info +// The API is currently in flux, so Debug, MemoryLimit, SwapLimit, and +// IPv4Forwarding are interfaces because in docker 1.6.1 they are 0 or 1 but in +// master they are bools. +type Info struct { + ID string + Containers int64 + Driver string + DriverStatus [][]string + ExecutionDriver string + Images int64 + KernelVersion string + OperatingSystem string + NCPU int64 + MemTotal int64 + Name string + Labels []string + Debug interface{} + NFd int64 + NGoroutines int64 + SystemTime string + NEventsListener int64 + InitPath string + InitSha1 string + IndexServerAddress string + MemoryLimit interface{} + SwapLimit interface{} + IPv4Forwarding interface{} + BridgeNfIptables bool + BridgeNfIp6tables bool + DockerRootDir string + HttpProxy string + HttpsProxy string + NoProxy string +} + +type ImageDelete struct { + Deleted string + Untagged string +} + +type EventOrError struct { + Event + Error error +} + +type WaitResult struct { + ExitCode int + Error error +} + +type decodingResult struct { + result interface{} + err error +} + +// The following are types for the API stats endpoint +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +type MemoryStats struct { + Usage uint64 `json:"usage"` + MaxUsage uint64 `json:"max_usage"` + Stats map[string]uint64 `json:"stats"` + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +type Stats struct { + Read time.Time `json:"read"` + NetworkStats NetworkStats `json:"network,omitempty"` + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} + +type Ulimit struct { + Name string `json:"name"` + Soft uint64 `json:"soft"` + Hard uint64 `json:"hard"` +} + +type LogConfig struct { + Type string `json:"type"` + Config map[string]string `json:"config"` +} + +type BuildImage struct { + Config *ConfigFile + DockerfileName string + Context io.Reader + RemoteURL string + RepoName string + SuppressOutput bool + NoCache bool + Remove bool + ForceRemove bool + Pull bool + Memory int64 + MemorySwap int64 + CpuShares int64 + CpuPeriod int64 + CpuQuota int64 + CpuSetCpus string + CpuSetMems string + CgroupParent string +} diff --git a/Godeps/_workspace/src/github.com/samalba/dockerclient/utils.go b/Godeps/_workspace/src/github.com/samalba/dockerclient/utils.go new file mode 100644 index 00000000..806f1b3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/samalba/dockerclient/utils.go @@ -0,0 +1,33 @@ +package dockerclient + +import ( + "crypto/tls" + "net" + "net/http" + "net/url" + "time" +) + +func newHTTPClient(u *url.URL, tlsConfig *tls.Config, timeout time.Duration) *http.Client { + httpTransport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + switch u.Scheme { + default: + httpTransport.Dial = func(proto, addr string) (net.Conn, error) { + return net.DialTimeout(proto, addr, timeout) + } + case "unix": + socketPath := u.Path + unixDial := func(proto, addr string) (net.Conn, error) { + return net.DialTimeout("unix", socketPath, timeout) + } + httpTransport.Dial = unixDial + // Override the main URL object so the HTTP lib won't complain + u.Scheme = "http" + u.Host = "unix.sock" + u.Path = "" + } + return &http.Client{Transport: httpTransport} +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 00000000..fbf03f4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,895 @@ +package assert + +import ( + "bufio" + "bytes" + "fmt" + "math" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + if reflect.DeepEqual(expected, actual) { + return true + } + + return false + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + expectedValue := reflect.ValueOf(expected) + if expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + if reflect.DeepEqual(actual, expectedValue.Convert(actualType).Interface()) { + return true + } + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occured in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + return nil + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's +// test printing (see inner comment for specifics) +func indentMessageLines(message string, tabs int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + if i != 0 { + outBuf.WriteRune('\n') + } + for ii := 0; ii < tabs; ii++ { + outBuf.WriteRune('\t') + // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter + // by 1 prematurely. + if ii == 0 && i > 0 { + ii++ + } + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") + if len(message) > 0 { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n"+ + "\r\tMessages:\t%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2), + message) + } else { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2)) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + success := true + + if object == nil { + success = false + } else { + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + success = false + } + } + + if !success { + Fail(t, "Expected value not to be nil.", msgAndArgs...) + } + + return success +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var zeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range zeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Ptr: + { + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is true. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, "Should not be equal", msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string or list(array, slice...) contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string or list(array, slice...) does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +// min(|expected|, |actual|) * epsilon +func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 { + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + // invalid input + return 0 + } + + if af < 0 { + af = -af + } + if bf < 0 { + bf = -bf + } + var delta float64 + if af < bf { + delta = af * epsilon + } else { + delta = bf * epsilon + } + return delta +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + delta := calcEpsilonDelta(expected, actual, epsilon) + + return InDelta(t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if isNil(err) { + return true + } + + return Fail(t, fmt.Sprintf("No error is expected but got %v", err), msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + return NotNil(t, err, "An error is expected but got nil. %s", message) + +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + return false + } + s := "An error with value \"%s\" is expected but got \"%s\". %s" + return Equal(t, theError.Error(), errString, + s, errString, theError.Error(), message) +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 00000000..36c671ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,813 @@ +package assert + +import ( + "errors" + "io" + "math" + "regexp" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual('x', "x") { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual("x", 'x') { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0, 0.1) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0.1, 0) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(uint32(10), int32(10)) { + t.Error("objectsAreEqual should return false") + } + if !ObjectsAreEqualValues(uint32(10), int32(10)) { + t.Error("ObjectsAreEqualValues should return true") + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int32(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, uint64(123), uint64(123)) { + t.Error("Equal should return true") + } + +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } + funcA := func() int { return 23 } + funcB := func() int { return 42 } + if !NotEqual(mockT, funcA, funcB) { + t.Error("NotEqual should return true") + } + + if NotEqual(mockT, "Hello World", "Hello World") { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123, 123) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123.5, 123.5) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return false") + } +} + +type A struct { + Name, Value string +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + complexList := []*A{ + {"b", "c"}, + {"d", "e"}, + {"g", "h"}, + {"j", "k"}, + } + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !Contains(mockT, list, "Bar") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") + } + if Contains(mockT, list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + if !Contains(mockT, complexList, &A{"g", "h"}) { + t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !NotContains(mockT, list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if NotContains(mockT, list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func Test_includeElement(t *testing.T) { + + list1 := []string{"Foo", "Bar"} + list2 := []int{1, 2} + + ok, found := includeElement("Hello World", "World") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Bar") + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 1) + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 2) + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo!") + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, 3) + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, "1") + True(t, ok) + False(t, found) + + ok, found = includeElement(1433, "1") + False(t, ok) + False(t, found) + +} + +func TestCondition(t *testing.T) { + mockT := new(testing.T) + + if !Condition(mockT, func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if Condition(mockT, func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + +} + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } + + cases = []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 4}, + {[...]int{1, 2, 3}, 2}, + {"ABC", 2}, + {map[int]int{1: 2, 2: 4, 3: 6}, 4}, + {ch, 2}, + + {[]int{}, 1}, + {map[int]int{}, 1}, + {make(chan int), 1}, + + {[]int(nil), 1}, + {map[int]int(nil), 1}, + {(chan int)(nil), 1}, + } + + for _, c := range cases { + False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") + False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInDeltaSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InDeltaSlice(mockT, + []float64{1.001, 0.999}, + []float64{1, 1}, + 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") + + True(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 1), "{1, 2} is element-wise close to {0, 3} in delta=1") + + False(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") + + False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestInEpsilon(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + +} + +func TestInEpsilonSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") + + False(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") + + False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestRegexp(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, Regexp(mockT, tc.rx, tc.str)) + True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + False(t, NotRegexp(mockT, tc.rx, tc.str)) + False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + True(t, NotRegexp(mockT, tc.rx, tc.str)) + True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } +} + +func testAutogeneratedFunction() { + defer func() { + if err := recover(); err == nil { + panic("did not panic") + } + CallerInfo() + }() + t := struct { + io.Closer + }{} + var c io.Closer + c = t + c.Close() +} + +func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) { + NotPanics(t, func() { + testAutogeneratedFunction() + }) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 00000000..f6781062 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,154 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +// +// Here is an overview of the assert functions: +// +// assert.Equal(t, expected, actual [, message [, format-args]]) +// +// assert.EqualValues(t, expected, actual [, message [, format-args]]) +// +// assert.NotEqual(t, notExpected, actual [, message [, format-args]]) +// +// assert.True(t, actualBool [, message [, format-args]]) +// +// assert.False(t, actualBool [, message [, format-args]]) +// +// assert.Nil(t, actualObject [, message [, format-args]]) +// +// assert.NotNil(t, actualObject [, message [, format-args]]) +// +// assert.Empty(t, actualObject [, message [, format-args]]) +// +// assert.NotEmpty(t, actualObject [, message [, format-args]]) +// +// assert.Len(t, actualObject, expectedLength, [, message [, format-args]]) +// +// assert.Error(t, errorObject [, message [, format-args]]) +// +// assert.NoError(t, errorObject [, message [, format-args]]) +// +// assert.EqualError(t, theError, errString [, message [, format-args]]) +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// assert.IsType(t, expectedObject, actualObject [, message [, format-args]]) +// +// assert.Contains(t, stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.NotContains(t, stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.Panics(t, func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// assert.NotPanics(t, func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]]) +// +// assert.InDelta(t, numA, numB, delta, [, message [, format-args]]) +// +// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]]) +// +// assert package contains Assertions object. it has assertion methods. +// +// Here is an overview of the assert functions: +// assert.Equal(expected, actual [, message [, format-args]]) +// +// assert.EqualValues(expected, actual [, message [, format-args]]) +// +// assert.NotEqual(notExpected, actual [, message [, format-args]]) +// +// assert.True(actualBool [, message [, format-args]]) +// +// assert.False(actualBool [, message [, format-args]]) +// +// assert.Nil(actualObject [, message [, format-args]]) +// +// assert.NotNil(actualObject [, message [, format-args]]) +// +// assert.Empty(actualObject [, message [, format-args]]) +// +// assert.NotEmpty(actualObject [, message [, format-args]]) +// +// assert.Len(actualObject, expectedLength, [, message [, format-args]]) +// +// assert.Error(errorObject [, message [, format-args]]) +// +// assert.NoError(errorObject [, message [, format-args]]) +// +// assert.EqualError(theError, errString [, message [, format-args]]) +// +// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// assert.IsType(expectedObject, actualObject [, message [, format-args]]) +// +// assert.Contains(stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.NotContains(stringOrSlice, substringOrElement [, message [, format-args]]) +// +// assert.Panics(func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// assert.NotPanics(func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]]) +// +// assert.InDelta(numA, numB, delta, [, message [, format-args]]) +// +// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]]) +package assert diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 00000000..ac9dc9d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 00000000..d8d3f531 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,265 @@ +package assert + +import "time" + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a +// slice with len == 0. +// +// assert.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a +// slice with len == 0. +// +// if assert.NotEmpty(obj) { +// assert.Equal("two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + +// True asserts that the specified value is true. +// +// assert.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + +// False asserts that the specified value is true. +// +// assert.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// Contains asserts that the specified string contains the specified substring. +// +// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + +// NotContains asserts that the specified string does NOT contain the specified substring. +// +// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(err) { +// assert.Equal(actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool { + return NoError(a.t, theError, msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(err, "An error was expected") { +// assert.Equal(err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool { + return Error(a.t, theError, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(err, "An error was expected") { +// assert.Equal(err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 00000000..3df3f391 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,511 @@ +package assert + +import ( + "errors" + "regexp" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestEqualValuesWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.EqualValues(uint32(10), int32(10)) { + t.Error("EqualValues should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !assert.Contains(list, "Foo") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if assert.Contains(list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !assert.NotContains(list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if assert.NotContains(list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func TestConditionWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Condition(func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if assert.Condition(func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDeltaWrapper(t *testing.T) { + assert := New(new(testing.T)) + + True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") + False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInEpsilonWrapper(t *testing.T) { + assert := New(new(testing.T)) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } +} + +func TestRegexpWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, assert.Regexp(tc.rx, tc.str)) + True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + False(t, assert.NotRegexp(tc.rx, tc.str)) + False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + True(t, assert.NotRegexp(tc.rx, tc.str)) + True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 00000000..1246e58e --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,157 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 +// if building a new request fails. +func httpCode(handler http.HandlerFunc, mode, url string, values url.Values) int { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + if err != nil { + return -1 + } + handler(w, req) + return w.Code +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { + code := httpCode(handler, mode, url, values) + if code == -1 { + return false + } + return code >= http.StatusOK && code <= http.StatusPartialContent +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { + code := httpCode(handler, mode, url, values) + if code == -1 { + return false + } + return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { + code := httpCode(handler, mode, url, values) + if code == -1 { + return false + } + return code >= http.StatusBadRequest +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, mode, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, mode, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, mode, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) + } + + return !contains +} + +// +// Assertions Wrappers +// + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, mode, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, mode, url, values) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, mode, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, mode, url, values) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, mode, url string, values url.Values) bool { + return HTTPError(a.t, handler, mode, url, values) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, mode, url, values, str) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, mode, url, values, str) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go new file mode 100644 index 00000000..684c2d5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go @@ -0,0 +1,86 @@ +package assert + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +func httpOK(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func httpRedirect(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTemporaryRedirect) +} + +func httpError(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) +} + +func TestHTTPStatuses(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true) + assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true) + assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true) +} + +func TestHTTPStatusesWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) +} + +func httpHelloName(w http.ResponseWriter, r *http.Request) { + name := r.FormValue("name") + w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) +} + +func TestHttpBody(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) +} + +func TestHttpBodyWrappers(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go new file mode 100644 index 00000000..7b38438f --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go @@ -0,0 +1,77 @@ +// Alternative testing tools which stop test execution if test failed. +// +// Example Usage +// +// The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// require.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// Assertions +// +// The `require` package have same global functions as in the `assert` package, +// but instead of returning a boolean result they call `t.FailNow()`. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +// +// Here is an overview of the assert functions: +// +// require.Equal(t, expected, actual [, message [, format-args]) +// +// require.NotEqual(t, notExpected, actual [, message [, format-args]]) +// +// require.True(t, actualBool [, message [, format-args]]) +// +// require.False(t, actualBool [, message [, format-args]]) +// +// require.Nil(t, actualObject [, message [, format-args]]) +// +// require.NotNil(t, actualObject [, message [, format-args]]) +// +// require.Empty(t, actualObject [, message [, format-args]]) +// +// require.NotEmpty(t, actualObject [, message [, format-args]]) +// +// require.Error(t, errorObject [, message [, format-args]]) +// +// require.NoError(t, errorObject [, message [, format-args]]) +// +// require.EqualError(t, theError, errString [, message [, format-args]]) +// +// require.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// require.IsType(t, expectedObject, actualObject [, message [, format-args]]) +// +// require.Contains(t, string, substring [, message [, format-args]]) +// +// require.NotContains(t, string, substring [, message [, format-args]]) +// +// require.Panics(t, func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// require.NotPanics(t, func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// require.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]]) +// +// require.InDelta(t, numA, numB, delta, [, message [, format-args]]) +// +// require.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]]) +package require diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements.go new file mode 100644 index 00000000..069d4198 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements.go @@ -0,0 +1,211 @@ +package require + +import ( + "time" + + "github.com/stretchr/testify/assert" +) + +type Assertions struct { + t TestingT +} + +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + FailNow(a.t, failureMessage, msgAndArgs...) +} + +// Implements asserts that an object is implemented by the specified interface. + +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + IsType(a.t, expectedType, object, msgAndArgs...) +} + +// Equal asserts that two objects are equal. +// +// require.Equal(123, 123, "123 and 123 should be equal") +func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) { + Equal(a.t, expected, actual, msgAndArgs...) +} + +// Exactly asserts that two objects are equal is value and type. +// +// require.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) { + Exactly(a.t, expected, actual, msgAndArgs...) +} + +// NotNil asserts that the specified object is not nil. +// +// require.NotNil(err, "err should be something") +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + NotNil(a.t, object, msgAndArgs...) +} + +// Nil asserts that the specified object is nil. +// +// require.Nil(err, "err should be nothing") +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + Nil(a.t, object, msgAndArgs...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a +// slice with len == 0. +// +// require.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + Empty(a.t, object, msgAndArgs...) +} + +// Empty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a +// slice with len == 0. +// +// if require.NotEmpty(obj) { +// require.Equal("two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + NotEmpty(a.t, object, msgAndArgs...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// require.Len(mySlice, 3, "The size of slice is not 3") +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + Len(a.t, object, length, msgAndArgs...) +} + +// True asserts that the specified value is true. +// +// require.True(myBool, "myBool should be true") +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + True(a.t, value, msgAndArgs...) +} + +// False asserts that the specified value is true. +// +// require.False(myBool, "myBool should be false") +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + False(a.t, value, msgAndArgs...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// require.NotEqual(obj1, obj2, "two objects shouldn't be equal") +func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) { + NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// Contains asserts that the specified string contains the specified substring. +// +// require.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) { + Contains(a.t, s, contains, msgAndArgs...) +} + +// NotContains asserts that the specified string does NOT contain the specified substring. +// +// require.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) { + NotContains(a.t, s, contains, msgAndArgs...) +} + +// Uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + Condition(a.t, comp, msgAndArgs...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// require.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + Panics(a.t, f, msgAndArgs...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// require.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + NotPanics(a.t, f, msgAndArgs...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// require.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// require.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if require.NoError(err) { +// require.Equal(actualObj, expectedObj) +// } +func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) { + NoError(a.t, theError, msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if require.Error(err, "An error was expected") { +// require.Equal(err, expectedError) +// } +func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) { + Error(a.t, theError, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if require.Error(err, "An error was expected") { +// require.Equal(err, expectedError) +// } +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + EqualError(a.t, theError, errString, msgAndArgs...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + Regexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + NotRegexp(a.t, rx, str, msgAndArgs...) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements_test.go new file mode 100644 index 00000000..02be2916 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/forward_requirements_test.go @@ -0,0 +1,260 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + require := New(t) + + require.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsTypeWrapper(t *testing.T) { + require := New(t) + require.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualWrapper(t *testing.T) { + require := New(t) + require.Equal(1, 1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Equal(1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEqualWrapper(t *testing.T) { + require := New(t) + require.NotEqual(1, 2) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEqual(2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactlyWrapper(t *testing.T) { + require := New(t) + + a := float32(1) + b := float32(1) + c := float64(1) + + require.Exactly(a, b) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Exactly(a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNilWrapper(t *testing.T) { + require := New(t) + require.NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotNil(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNilWrapper(t *testing.T) { + require := New(t) + require.Nil(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Nil(new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrueWrapper(t *testing.T) { + require := New(t) + require.True(true) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.True(false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalseWrapper(t *testing.T) { + require := New(t) + require.False(false) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.False(true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContainsWrapper(t *testing.T) { + require := New(t) + require.Contains("Hello World", "Hello") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Contains("Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContainsWrapper(t *testing.T) { + require := New(t) + require.NotContains("Hello World", "Hello!") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotContains("Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanicsWrapper(t *testing.T) { + require := New(t) + require.Panics(func() { + panic("Panic!") + }) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Panics(func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanicsWrapper(t *testing.T) { + require := New(t) + require.NotPanics(func() {}) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotPanics(func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoErrorWrapper(t *testing.T) { + require := New(t) + require.NoError(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NoError(errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestErrorWrapper(t *testing.T) { + require := New(t) + require.Error(errors.New("some error")) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Error(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualErrorWrapper(t *testing.T) { + require := New(t) + require.EqualError(errors.New("some error"), "some error") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.EqualError(errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmptyWrapper(t *testing.T) { + require := New(t) + require.Empty("") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Empty("x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmptyWrapper(t *testing.T) { + require := New(t) + require.NotEmpty("x") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEmpty("") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDurationWrapper(t *testing.T) { + require := New(t) + a := time.Now() + b := a.Add(10 * time.Second) + + require.WithinDuration(a, b, 15*time.Second) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.WithinDuration(a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDeltaWrapper(t *testing.T) { + require := New(t) + require.InDelta(1.001, 1, 0.01) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.InDelta(1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go new file mode 100644 index 00000000..122a3f3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go @@ -0,0 +1,271 @@ +package require + +import ( + "time" + + "github.com/stretchr/testify/assert" +) + +type TestingT interface { + Errorf(format string, args ...interface{}) + FailNow() +} + +// Fail reports a failure through +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + assert.Fail(t, failureMessage, msgAndArgs...) + t.FailNow() +} + +// Implements asserts that an object is implemented by the specified interface. +// +// require.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { + t.FailNow() + } +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.IsType(t, expectedType, object, msgAndArgs...) { + t.FailNow() + } +} + +// Equal asserts that two objects are equal. +// +// require.Equal(t, 123, 123, "123 and 123 should be equal") +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Equal(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// EqualValues asserts that two objects are equal or convertable to each other. +// +// require.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { + if !assert.EqualValues(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Exactly asserts that two objects are equal is value and type. +// +// require.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Exactly(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// NotNil asserts that the specified object is not nil. +// +// require.NotNil(t, err, "err should be something") +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotNil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Nil asserts that the specified object is nil. +// +// require.Nil(t, err, "err should be nothing") +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Nil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// require.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Empty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// require.NotEmpty(t, obj) +// require.Equal(t, "one", obj[0]) +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotEmpty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// require.Len(t, mySlice, 3, "The size of slice is not 3") +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if !assert.Len(t, object, length, msgAndArgs...) { + t.FailNow() + } +} + +// True asserts that the specified value is true. +// +// require.True(t, myBool, "myBool should be true") +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.True(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// False asserts that the specified value is true. +// +// require.False(t, myBool, "myBool should be false") +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.False(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// NotEqual asserts that the specified values are NOT equal. +// +// require.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { + if !assert.NotEqual(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Contains asserts that the specified string contains the specified substring. +// +// require.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) { + if !assert.Contains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// NotContains asserts that the specified string does NOT contain the specified substring. +// +// require.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) { + if !assert.NotContains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if !assert.Condition(t, comp, msgAndArgs...) { + t.FailNow() + } +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// require.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.Panics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// require.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.NotPanics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// require.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// Regexp asserts that a specified regexp matches a string. +// +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.Regexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.NotRegexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// require.NoError(t, err) +// require.Equal(t, actualObj, expectedObj) +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.NoError(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// require.Error(t, err, "An error was expected") +// require.Equal(t, err, expectedError) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.Error(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// require.Error(t, err, "An error was expected") +// require.Equal(t, err, expectedError) +// } +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if !assert.EqualError(t, theError, errString, msgAndArgs...) { + t.FailNow() + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements_test.go new file mode 100644 index 00000000..9131b2f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements_test.go @@ -0,0 +1,266 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +type MockT struct { + Failed bool +} + +func (t *MockT) FailNow() { + t.Failed = true +} + +func (t *MockT) Errorf(format string, args ...interface{}) { + _, _ = format, args +} + +func TestImplements(t *testing.T) { + + Implements(t, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsType(t *testing.T) { + + IsType(t, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqual(t *testing.T) { + + Equal(t, 1, 1) + + mockT := new(MockT) + Equal(mockT, 1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } + +} + +func TestNotEqual(t *testing.T) { + + NotEqual(t, 1, 2) + mockT := new(MockT) + NotEqual(mockT, 2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactly(t *testing.T) { + + a := float32(1) + b := float32(1) + c := float64(1) + + Exactly(t, a, b) + + mockT := new(MockT) + Exactly(mockT, a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNil(t *testing.T) { + + NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + NotNil(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNil(t *testing.T) { + + Nil(t, nil) + + mockT := new(MockT) + Nil(mockT, new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrue(t *testing.T) { + + True(t, true) + + mockT := new(MockT) + True(mockT, false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalse(t *testing.T) { + + False(t, false) + + mockT := new(MockT) + False(mockT, true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContains(t *testing.T) { + + Contains(t, "Hello World", "Hello") + + mockT := new(MockT) + Contains(mockT, "Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContains(t *testing.T) { + + NotContains(t, "Hello World", "Hello!") + + mockT := new(MockT) + NotContains(mockT, "Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanics(t *testing.T) { + + Panics(t, func() { + panic("Panic!") + }) + + mockT := new(MockT) + Panics(mockT, func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanics(t *testing.T) { + + NotPanics(t, func() {}) + + mockT := new(MockT) + NotPanics(mockT, func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoError(t *testing.T) { + + NoError(t, nil) + + mockT := new(MockT) + NoError(mockT, errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestError(t *testing.T) { + + Error(t, errors.New("some error")) + + mockT := new(MockT) + Error(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualError(t *testing.T) { + + EqualError(t, errors.New("some error"), "some error") + + mockT := new(MockT) + EqualError(mockT, errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmpty(t *testing.T) { + + Empty(t, "") + + mockT := new(MockT) + Empty(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmpty(t *testing.T) { + + NotEmpty(t, "x") + + mockT := new(MockT) + NotEmpty(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDuration(t *testing.T) { + + a := time.Now() + b := a.Add(10 * time.Second) + + WithinDuration(t, a, b, 15*time.Second) + + mockT := new(MockT) + WithinDuration(mockT, a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDelta(t *testing.T) { + + InDelta(t, 1.001, 1, 0.01) + + mockT := new(MockT) + InDelta(mockT, 1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 00000000..741eeb13 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,892 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} +var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return t.c.Write(buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = t.c.Write(buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } + + panic("unreachable") // for Go 1.0. +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go new file mode 100644 index 00000000..a663fe41 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go @@ -0,0 +1,269 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "io" + "testing" +) + +type MockTerminal struct { + toSend []byte + bytesPerRead int + received []byte +} + +func (c *MockTerminal) Read(data []byte) (n int, err error) { + n = len(data) + if n == 0 { + return + } + if n > len(c.toSend) { + n = len(c.toSend) + } + if n == 0 { + return 0, io.EOF + } + if c.bytesPerRead > 0 && n > c.bytesPerRead { + n = c.bytesPerRead + } + copy(data, c.toSend[:n]) + c.toSend = c.toSend[n:] + return +} + +func (c *MockTerminal) Write(data []byte) (n int, err error) { + c.received = append(c.received, data...) + return len(data), nil +} + +func TestClose(t *testing.T) { + c := &MockTerminal{} + ss := NewTerminal(c, "> ") + line, err := ss.ReadLine() + if line != "" { + t.Errorf("Expected empty line but got: %s", line) + } + if err != io.EOF { + t.Errorf("Error should have been EOF but got: %s", err) + } +} + +var keyPressTests = []struct { + in string + line string + err error + throwAwayLines int +}{ + { + err: io.EOF, + }, + { + in: "\r", + line: "", + }, + { + in: "foo\r", + line: "foo", + }, + { + in: "a\x1b[Cb\r", // right + line: "ab", + }, + { + in: "a\x1b[Db\r", // left + line: "ba", + }, + { + in: "a\177b\r", // backspace + line: "b", + }, + { + in: "\x1b[A\r", // up + }, + { + in: "\x1b[B\r", // down + }, + { + in: "line\x1b[A\x1b[B\r", // up then down + line: "line", + }, + { + in: "line1\rline2\x1b[A\r", // recall previous line. + line: "line1", + throwAwayLines: 1, + }, + { + // recall two previous lines and append. + in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r", + line: "line1xxx", + throwAwayLines: 2, + }, + { + // Ctrl-A to move to beginning of line followed by ^K to kill + // line. + in: "a b \001\013\r", + line: "", + }, + { + // Ctrl-A to move to beginning of line, Ctrl-E to move to end, + // finally ^K to kill nothing. + in: "a b \001\005\013\r", + line: "a b ", + }, + { + in: "\027\r", + line: "", + }, + { + in: "a\027\r", + line: "", + }, + { + in: "a \027\r", + line: "", + }, + { + in: "a b\027\r", + line: "a ", + }, + { + in: "a b \027\r", + line: "a ", + }, + { + in: "one two thr\x1b[D\027\r", + line: "one two r", + }, + { + in: "\013\r", + line: "", + }, + { + in: "a\013\r", + line: "a", + }, + { + in: "ab\x1b[D\013\r", + line: "a", + }, + { + in: "Ξεσκεπάζω\r", + line: "Ξεσκεπάζω", + }, + { + in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace. + line: "", + throwAwayLines: 1, + }, + { + in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter. + line: "£", + throwAwayLines: 1, + }, + { + // Ctrl-D at the end of the line should be ignored. + in: "a\004\r", + line: "a", + }, + { + // a, b, left, Ctrl-D should erase the b. + in: "ab\x1b[D\004\r", + line: "a", + }, + { + // a, b, c, d, left, left, ^U should erase to the beginning of + // the line. + in: "abcd\x1b[D\x1b[D\025\r", + line: "cd", + }, + { + // Bracketed paste mode: control sequences should be returned + // verbatim in paste mode. + in: "abc\x1b[200~de\177f\x1b[201~\177\r", + line: "abcde\177", + }, + { + // Enter in bracketed paste mode should still work. + in: "abc\x1b[200~d\refg\x1b[201~h\r", + line: "efgh", + throwAwayLines: 1, + }, + { + // Lines consisting entirely of pasted data should be indicated as such. + in: "\x1b[200~a\r", + line: "a", + err: ErrPasteIndicator, + }, +} + +func TestKeyPresses(t *testing.T) { + for i, test := range keyPressTests { + for j := 1; j < len(test.in); j++ { + c := &MockTerminal{ + toSend: []byte(test.in), + bytesPerRead: j, + } + ss := NewTerminal(c, "> ") + for k := 0; k < test.throwAwayLines; k++ { + _, err := ss.ReadLine() + if err != nil { + t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err) + } + } + line, err := ss.ReadLine() + if line != test.line { + t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line) + break + } + if err != test.err { + t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err) + break + } + } + } +} + +func TestPasswordNotSaved(t *testing.T) { + c := &MockTerminal{ + toSend: []byte("password\r\x1b[A\r"), + bytesPerRead: 1, + } + ss := NewTerminal(c, "> ") + pw, _ := ss.ReadPassword("> ") + if pw != "password" { + t.Fatalf("failed to read password, got %s", pw) + } + line, _ := ss.ReadLine() + if len(line) > 0 { + t.Fatalf("password was saved in history") + } +} + +var setSizeTests = []struct { + width, height int +}{ + {40, 13}, + {80, 24}, + {132, 43}, +} + +func TestTerminalSetSize(t *testing.T) { + for _, setSize := range setSizeTests { + c := &MockTerminal{ + toSend: []byte("password\r\x1b[A\r"), + bytesPerRead: 1, + } + ss := NewTerminal(c, "> ") + ss.SetSize(setSize.width, setSize.height) + pw, _ := ss.ReadPassword("Password: ") + if pw != "password" { + t.Fatalf("failed to read password, got %s", pw) + } + if string(c.received) != "Password: \r\n" { + t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 00000000..0763c9a9 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,128 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "io" + "syscall" + "unsafe" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF + newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var oldState syscall.Termios + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + defer func() { + syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_bsd.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 00000000..9c1ffd14 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_linux.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 00000000..5883b22d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_windows.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 00000000..2dd6c3d9 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} From 598f47b68d9fa30ec2409a4c01b73ca0b13d5591 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 6 Aug 2015 00:04:46 -0700 Subject: [PATCH 5/9] Update build for docker-from-scratch --- .dockerignore | 1 + Dockerfile.base | 3 --- Makefile | 3 ++- Makefile.docker | 41 ++++++++++++++++++++----------------- scripts/copy-images | 10 +-------- scripts/docker-run.sh | 12 +++++++++-- scripts/mk-images-tar.sh | 2 +- scripts/mk-initrd.sh | 22 ++++++++++++++++---- scripts/mk-rancheros-iso.sh | 2 ++ scripts/run | 1 - 10 files changed, 57 insertions(+), 40 deletions(-) diff --git a/.dockerignore b/.dockerignore index 43c4ac91..962cb50c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,5 @@ tmp state build dist +assets Godeps/_workspace/pkg diff --git a/Dockerfile.base b/Dockerfile.base index 3802b489..3355c155 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -18,9 +18,6 @@ RUN cd /usr/src/go/src && ./make.bash --no-clean 2>&1 ENV GOROOT /usr/src/go ENV PATH $GOROOT/bin:$PATH -RUN go clean -i net -RUN go install -tags netgo std - RUN mkdir -p /go/src /go/bin && chmod -R 777 /go ENV GOPATH /go ENV PATH /go/bin:$PATH diff --git a/Makefile b/Makefile index 1ff464ee..0f469fd9 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,16 @@ - include common.make compile: bin/rancheros + all: clean ros-build-base build-all ros-build-base: docker build -t ros-build-base -f Dockerfile.base . + ros-build-image: docker build -t ros-build . diff --git a/Makefile.docker b/Makefile.docker index ce36c85b..2181eb13 100644 --- a/Makefile.docker +++ b/Makefile.docker @@ -1,11 +1,12 @@ -DOCKER_BINARY_URL := https://github.com/rancher/docker/releases/download/v1.7.1-ros-1/docker-1.7.1 +DOCKER_BINARY_URL := https://github.com/rancher/docker/releases/download/v1.8.0-rc2-ros/docker-1.8.0-rc2 include common.make -bin/rancheros: bin - godep go build -tags netgo -ldflags "-X github.com/rancherio/os/config.VERSION $(VERSION) -linkmode external -extldflags -static" -o $@ +bin/rancheros: + mkdir -p bin + godep go build -tags netgo -installsuffix netgo -ldflags "-X github.com/rancherio/os/config.VERSION $(VERSION) -linkmode external -extldflags -static" -o $@ strip --strip-all $@ @@ -13,44 +14,46 @@ pwd := $(shell pwd) include scripts/build-common CD := $(BUILD)/cd -assets bin $(DIST)/artifacts $(CD)/boot/isolinux: - mkdir -p $@ - -DOCKER_BINARY := $(shell basename $(DOCKER_BINARY_URL)) - -assets/$(DOCKER_BINARY): assets - cd assets && curl -OL "$(DOCKER_BINARY_URL)" - -assets/docker: assets/$(DOCKER_BINARY) - mv assets/$(DOCKER_BINARY) $@ +assets/docker: + mkdir -p assets + curl -L "$(DOCKER_BINARY_URL)" > $@ chmod +x $@ + copy-images: ./scripts/copy-images -$(DIST)/artifacts/vmlinuz: $(DIST)/artifacts copy-images + +$(DIST)/artifacts/vmlinuz: copy-images + mkdir -p $(DIST)/artifacts mv $(BUILD)/kernel/vmlinuz $@ INITRD_DIR := $(BUILD)/initrd -$(INITRD_DIR)/images.tar: bin/rancheros - FORCE_PULL=$(FORCE_PULL) INITRD_DIR=$(INITRD_DIR) ./scripts/mk-images-tar.sh + +$(BUILD)/images.tar: bin/rancheros os-config.yml + FORCE_PULL=$(FORCE_PULL) BUILD=$(BUILD) ./scripts/mk-images-tar.sh -$(DIST)/artifacts/initrd: $(DIST)/artifacts bin/rancheros assets/docker copy-images $(INITRD_DIR)/images.tar +$(DIST)/artifacts/initrd: bin/rancheros assets/docker copy-images $(BUILD)/images.tar + mkdir -p $(DIST)/artifacts INITRD_DIR=$(INITRD_DIR) ./scripts/mk-initrd.sh -$(DIST)/artifacts/rancheros.iso: $(DIST)/artifacts/initrd $(CD)/boot/isolinux + +$(DIST)/artifacts/rancheros.iso: $(DIST)/artifacts/initrd CD=$(CD) ./scripts/mk-rancheros-iso.sh + $(DIST)/artifacts/iso-checksums.txt: $(DIST)/artifacts/rancheros.iso ./scripts/mk-iso-checksums-txt.sh + version: @echo $(VERSION) + build-all: \ bin/rancheros \ $(DIST)/artifacts/initrd \ @@ -59,4 +62,4 @@ build-all: \ $(DIST)/artifacts/iso-checksums.txt -.PHONY: build-all version copy-images +.PHONY: build-all version copy-images os-config.yml diff --git a/scripts/copy-images b/scripts/copy-images index ab98ce6a..82e9cc4a 100755 --- a/scripts/copy-images +++ b/scripts/copy-images @@ -10,18 +10,10 @@ VERSION=${VERSION:?"VERSION not set"} BUILD=${BUILD:?"BUILD not set"} -CONTAINER_INITRDBASE=$(docker create rancher/os-initrdbase:${VERSION}) -cleanup_initrdbase() { - docker rm -v ${CONTAINER_INITRDBASE} -} -trap cleanup_initrdbase EXIT -docker cp ${CONTAINER_INITRDBASE}:/initrd ${BUILD} # copies files to ${BUILD}/initrd - - CONTAINER_KERNEL=$(docker create rancher/os-kernel:${VERSION}) cleanup_kernel() { - cleanup_initrdbase docker rm -v ${CONTAINER_KERNEL} } trap cleanup_kernel EXIT + docker cp ${CONTAINER_KERNEL}:/kernel ${BUILD} # copies files to ${BUILD}/kernel diff --git a/scripts/docker-run.sh b/scripts/docker-run.sh index c1745d0d..c3bf3b8d 100755 --- a/scripts/docker-run.sh +++ b/scripts/docker-run.sh @@ -1,5 +1,13 @@ #!/bin/bash set -e -docker rm -fv ros-build > /dev/null 2>&1 || : -exec docker run -v /var/run/docker.sock:/var/run/docker.sock --name=ros-build ros-build "$@" +DOCKER_ARGS= +if [ -n "$BIND_DIR" ]; then + if [ "$BIND_DIR" = "." ]; then + BIND_DIR=$(pwd) + fi + DOCKER_ARGS="-t -v $BIND_DIR:/go/src/github.com/rancherio/os" +fi + +docker rm -fv ros-build >/dev/null 2>&1 || true +exec docker run -i -v /var/run/docker.sock:/var/run/docker.sock $DOCKER_ARGS --name=ros-build ros-build "$@" diff --git a/scripts/mk-images-tar.sh b/scripts/mk-images-tar.sh index 0eaf161d..25b20c91 100755 --- a/scripts/mk-images-tar.sh +++ b/scripts/mk-images-tar.sh @@ -9,4 +9,4 @@ for i in `./ros c images -i os-config.yml`; do [ "${FORCE_PULL}" != "1" ] && docker inspect $i >/dev/null 2>&1 || docker pull $i; done -docker save `./ros c images -i os-config.yml` > ${INITRD_DIR}/images.tar +docker save `./ros c images -i os-config.yml` > ${BUILD}/images.tar diff --git a/scripts/mk-initrd.sh b/scripts/mk-initrd.sh index a2dd02dd..15f3db4c 100755 --- a/scripts/mk-initrd.sh +++ b/scripts/mk-initrd.sh @@ -4,8 +4,22 @@ set -ex cd $(dirname $0)/.. . scripts/build-common -mv ${BUILD}/kernel/lib ${INITRD_DIR} -mv assets/docker ${INITRD_DIR} -cp os-config.yml ${INITRD_DIR} -cp bin/rancheros ${INITRD_DIR}/init +rm -rf ${INITRD_DIR}/{usr,init} +mkdir -p ${INITRD_DIR}/usr/{bin,share/ros} + +cp -rf ${BUILD}/kernel/lib ${INITRD_DIR}/usr +cp assets/docker ${INITRD_DIR}/usr/bin/docker +cp ${BUILD}/images.tar ${INITRD_DIR}/usr/share/ros +cp os-config.yml ${INITRD_DIR}/usr/share/ros/ +cp bin/rancheros ${INITRD_DIR}/usr/bin/ros +ln -s usr/bin/ros ${INITRD_DIR}/init + +docker export $(docker create rancher/docker:1.8.0-rc2) | tar xvf - -C ${INITRD_DIR} --exclude=usr/bin/dockerlaunch \ + --exclude=usr/bin/docker \ + --exclude=usr/share/git-core \ + --exclude=usr/bin/git \ + --exclude=usr/bin/ssh \ + --exclude=usr/libexec/git-core \ + usr + cd ${INITRD_DIR} && find | cpio -H newc -o | lzma -c > ${DIST}/artifacts/initrd diff --git a/scripts/mk-rancheros-iso.sh b/scripts/mk-rancheros-iso.sh index 4b538eef..b7c288d1 100755 --- a/scripts/mk-rancheros-iso.sh +++ b/scripts/mk-rancheros-iso.sh @@ -4,6 +4,8 @@ set -ex cd $(dirname $0)/.. . scripts/build-common +mkdir -p ${CD}/boot/isolinux + cp ${DIST}/artifacts/initrd ${CD}/boot cp ${DIST}/artifacts/vmlinuz ${CD}/boot cp scripts/isolinux.cfg ${CD}/boot/isolinux diff --git a/scripts/run b/scripts/run index 1fd16a62..c05c4a38 100755 --- a/scripts/run +++ b/scripts/run @@ -60,7 +60,6 @@ if [ ! -d ${INITRD_TMP} ]; then else xz -dc ${INITRD} | cpio -idmv fi - rm -f init popd fi From cc5613c64cb835e6ae061d91ff5ffafc6e073961 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 6 Aug 2015 00:30:20 -0700 Subject: [PATCH 6/9] Fix os-config.yml location --- config/types.go | 2 +- os-config.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/types.go b/config/types.go index 7bc88e88..ed70320e 100644 --- a/config/types.go +++ b/config/types.go @@ -28,7 +28,7 @@ const ( SCOPE = "io.rancher.os.scope" SYSTEM = "system" - OsConfigFile = "/os-config.yml" + OsConfigFile = "/usr/share/ros/os-config.yml" CloudConfigFile = "/var/lib/rancher/conf/cloud-config.yml" CloudConfigScriptFile = "/var/lib/rancher/conf/cloud-config-script" MetaDataFile = "/var/lib/rancher/conf/metadata" diff --git a/os-config.yml b/os-config.yml index ea67bd1f..1b2706b5 100644 --- a/os-config.yml +++ b/os-config.yml @@ -271,8 +271,8 @@ rancher: - /home:/home - /opt:/opt system_docker: - args: [docker, -d, -s, overlay, -b, docker-sys, --fixed-cidr, - 172.18.42.1/16, --restart=false, -g, /var/lib/system-docker, -G, root, + args: [-d, -s, overlay, -b, docker-sys, --fixed-cidr, 172.18.42.1/16, + --restart=false, -g, /var/lib/system-docker, -G, root, -H, 'unix:///var/run/system-docker.sock', --userland-proxy=false] upgrade: url: https://releases.rancher.com/os/versions.yml @@ -280,4 +280,4 @@ rancher: user_docker: tls_args: [--tlsverify, --tlscacert=ca.pem, --tlscert=server-cert.pem, --tlskey=server-key.pem, '-H=0.0.0.0:2376'] - args: [docker, -d, -s, overlay, -G, docker, -H, 'unix:///var/run/docker.sock', --userland-proxy=false] + args: [-d, -s, overlay, -G, docker, -H, 'unix:///var/run/docker.sock', --userland-proxy=false] From 13106ed63f3a32adda004dd25d2d3538b6472601 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 6 Aug 2015 00:49:00 -0700 Subject: [PATCH 7/9] Fix sbin symlink and bootstrap services --- compose/project.go | 2 +- scripts/mk-initrd.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/compose/project.go b/compose/project.go index 08a2230f..168bd599 100644 --- a/compose/project.go +++ b/compose/project.go @@ -76,7 +76,7 @@ func newProject(name string, cfg *config.CloudConfig) (*project.Project, error) func addServices(p *project.Project, cfg *config.CloudConfig, enabled map[string]string, configs map[string]*project.ServiceConfig) { // Note: we ignore errors while loading services - for name, serviceConfig := range cfg.Rancher.Services { + for name, serviceConfig := range configs { hash := project.GetServiceHash(name, *serviceConfig) if enabled[name] == hash { diff --git a/scripts/mk-initrd.sh b/scripts/mk-initrd.sh index 15f3db4c..e85b3191 100755 --- a/scripts/mk-initrd.sh +++ b/scripts/mk-initrd.sh @@ -13,6 +13,7 @@ cp ${BUILD}/images.tar ${INITRD_DIR}/usr/share/ros cp os-config.yml ${INITRD_DIR}/usr/share/ros/ cp bin/rancheros ${INITRD_DIR}/usr/bin/ros ln -s usr/bin/ros ${INITRD_DIR}/init +ln -s bin ${INITRD_DIR}/usr/sbin docker export $(docker create rancher/docker:1.8.0-rc2) | tar xvf - -C ${INITRD_DIR} --exclude=usr/bin/dockerlaunch \ --exclude=usr/bin/docker \ From f072cff37c524180694a83890a2a4f75d5e9e9c7 Mon Sep 17 00:00:00 2001 From: Ivan Mikushin Date: Thu, 6 Aug 2015 15:03:29 +0500 Subject: [PATCH 8/9] cleanup after docker create --- scripts/mk-initrd.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/mk-initrd.sh b/scripts/mk-initrd.sh index e85b3191..3ba4b64f 100755 --- a/scripts/mk-initrd.sh +++ b/scripts/mk-initrd.sh @@ -15,12 +15,14 @@ cp bin/rancheros ${INITRD_DIR}/usr/bin/ros ln -s usr/bin/ros ${INITRD_DIR}/init ln -s bin ${INITRD_DIR}/usr/sbin -docker export $(docker create rancher/docker:1.8.0-rc2) | tar xvf - -C ${INITRD_DIR} --exclude=usr/bin/dockerlaunch \ - --exclude=usr/bin/docker \ - --exclude=usr/share/git-core \ - --exclude=usr/bin/git \ - --exclude=usr/bin/ssh \ - --exclude=usr/libexec/git-core \ - usr +DFS=$(docker create rancher/docker:1.8.0-rc2) +trap "docker rm -fv ${DFS}" EXIT +docker export ${DFS} | tar xvf - -C ${INITRD_DIR} --exclude=usr/bin/dockerlaunch \ + --exclude=usr/bin/docker \ + --exclude=usr/share/git-core \ + --exclude=usr/bin/git \ + --exclude=usr/bin/ssh \ + --exclude=usr/libexec/git-core \ + usr cd ${INITRD_DIR} && find | cpio -H newc -o | lzma -c > ${DIST}/artifacts/initrd From 1b327e8d7f384d3ef95abd031ab417a6086c05bb Mon Sep 17 00:00:00 2001 From: Ivan Mikushin Date: Fri, 7 Aug 2015 00:01:57 +0500 Subject: [PATCH 9/9] fix user-docker --- cmd/respawn/respawn.go | 2 ++ os-config.yml | 21 +++++---------------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/cmd/respawn/respawn.go b/cmd/respawn/respawn.go index 4902a84c..59599042 100644 --- a/cmd/respawn/respawn.go +++ b/cmd/respawn/respawn.go @@ -110,6 +110,8 @@ func execute(line string, wg *sync.WaitGroup) { args := strings.Split(line, " ") cmd := exec.Command("setsid", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr err := cmd.Start() if err != nil { diff --git a/os-config.yml b/os-config.yml index 1b2706b5..9e881c16 100644 --- a/os-config.yml +++ b/os-config.yml @@ -1,6 +1,6 @@ rancher: bootstrap: - udev: + udev-bootstrap: image: rancher/os-udev:v0.4.0-dev labels: io.rancher.os.detach: false @@ -22,7 +22,7 @@ rancher: log_driver: json-file net: none privileged: true - udev: + udev-autoformat: image: rancher/os-udev:v0.4.0-dev labels: io.rancher.os.detach: false @@ -131,7 +131,7 @@ rancher: - /usr/bin/ros:/usr/bin/respawn:ro - /usr/bin/ros:/usr/bin/system-docker:ro - /usr/bin/ros:/usr/sbin/wait-for-docker:ro - - /usr/bin/ros:/usr/sbin/dockerlaunch:ro + - /usr/bin/ros:/usr/bin/dockerlaunch:ro - /usr/bin/docker:/usr/bin/docker:ro console: image: rancher/os-console:v0.4.0-dev @@ -146,19 +146,6 @@ rancher: restart: always volumes_from: - all-volumes - docker: - image: rancher/os-docker:v0.4.0-dev - labels: - io.rancher.os.scope: system - io.rancher.os.after: network - net: host - uts: host - pid: host - ipc: host - privileged: true - restart: always - volumes_from: - - all-volumes docker-volumes: image: rancher/os-state:v0.4.0-dev labels: @@ -208,6 +195,8 @@ rancher: image: rancher/os-preload:v0.4.0-dev labels: io.rancher.os.detach: false + io.rancher.os.scope: system + io.rancher.os.after: console privileged: true volumes: - /var/run/docker.sock:/var/run/docker.sock